repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
sbis04/blazepose-oak
[ "a991abf4d190a172d4264011520503e529fc465a" ]
[ "BlazeposeDepthai.py" ]
[ "'''\n@author: geaxgx, Soumi7, sbis04\n'''\n\n# import time\nimport argparse\nimport csv\nimport os\nfrom math import acos, atan2\nfrom pathlib import Path\nimport numpy as np\nimport cv2\nimport depthai as dai\nimport numpy as np\nimport open3d as o3d\nimport subprocess as sp\n\nimport mediapipe_utils as mpu\nfrom FPS import FPS, now\nfrom o3d_utils import create_segment, create_grid\n\n# iter_count = 0\n# start_time = time.time()\n\n'''\nThe following part is for using the RTMP.\n'''\n\n# Starting\n# ----------------------------\n\n# from rtmp_url import rtmpURL\n\n# rtmp_url = \"url_here\"\n\n# fps = 15\n# width = 300\n# height = 300\n# sizeStr = \"300x300\"\n\n# command = ['ffmpeg',\n# '-re',\n# # '-f', 'rawvideo',\n# # '-vcodec', 'rawvideo',\n# '-s', sizeStr,\n# '-r', str(fps), # rtsp fps (from input server)\n# '-i', '-',\n\n# # You can change ffmpeg parameter after this item.\n# '-pix_fmt', 'yuv420p',\n# '-r', '15', # output fps\n# '-g', '20',\n# '-c:v', 'libx264',\n# '-b:v', '2M',\n# '-bufsize', '64M',\n# '-maxrate', \"4M\",\n# '-preset', 'veryfast',\n# '-rtsp_transport', 'tcp',\n# '-segment_times', '5',\n# # '-f', 'rtsp',\n# # rtsp_server]\n# '-f', 'flv',\n# '-flvflags', 'no_duration_filesize',\n# rtmp_url]\n\n# muxStream = sp.Popen(command, stdin=sp.PIPE)\n# ----------------------------\n# Ending\n\n'''\nFor using Firebase\n'''\n# Firebase python package\n# import pyrebase\n\n# initialize Firebase\n# firebase = pyrebase.initialize_app(firebaseConfig)\n# creating an object for Realtime database\n# db = firebase.database()\n\nSCRIPT_DIR = Path(__file__).resolve().parent\nPOSE_DETECTION_MODEL = SCRIPT_DIR / \"models/pose_detection.blob\"\nFULL_BODY_LANDMARK_MODEL = SCRIPT_DIR / \"models/pose_landmark_full_body.blob\"\nUPPER_BODY_LANDMARK_MODEL = SCRIPT_DIR / \"models/pose_landmark_upper_body.blob\"\n\nPOSES = {\n \"mountain\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n \"tree\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 180,\n \"RIGHT_HAND_HIP_ANGLE\": 180,\n\n \"LEFT_LEG_ANGLE\": 135,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 0,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"boat\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 45,\n \"RIGHT_HAND_HIP_ANGLE\": 45,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"bridge\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 45,\n \"RIGHT_HAND_HIP_ANGLE\": 45,\n\n \"LEFT_LEG_ANGLE\": 90,\n \"RIGHT_LEG_ANGLE\": 90,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"butterfly\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 0,\n \"RIGHT_HAND_HIP_ANGLE\": 0,\n\n \"LEFT_LEG_ANGLE\": 20,\n \"RIGHT_LEG_ANGLE\": 20,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 180,\n },\n\n \"camel\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 90,\n \"RIGHT_LEG_ANGLE\": 90,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"cat_cow\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 90,\n \"RIGHT_LEG_ANGLE\": 90,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"chair\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 180,\n \"RIGHT_HAND_HIP_ANGLE\": 180,\n\n \"LEFT_LEG_ANGLE\": 100,\n \"RIGHT_LEG_ANGLE\": 100,\n\n \"LEFT_HIP_KNEE_ANGLE\": 80,\n \"RIGHT_HIP_KNEE_ANGLE\": 80,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"child\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 170,\n \"RIGHT_HAND_HIP_ANGLE\": 170,\n\n \"LEFT_LEG_ANGLE\": 10,\n \"RIGHT_LEG_ANGLE\": 10,\n\n \"LEFT_HIP_KNEE_ANGLE\": 10,\n \"RIGHT_HIP_KNEE_ANGLE\": 10,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"cobra\": {\n \"LEFT_ARM_ANGLE\": 120,\n \"RIGHT_ARM_ANGLE\": 120,\n\n \"LEFT_HAND_HIP_ANGLE\": 40,\n \"RIGHT_HAND_HIP_ANGLE\": 40,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"corpse\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 40,\n \"RIGHT_HAND_HIP_ANGLE\": 40,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"cow_face\": {\n \"LEFT_ARM_ANGLE\": 10,\n \"RIGHT_ARM_ANGLE\": 10,\n\n \"LEFT_HAND_HIP_ANGLE\": 180,\n \"RIGHT_HAND_HIP_ANGLE\": 180,\n\n \"LEFT_LEG_ANGLE\": 90,\n \"RIGHT_LEG_ANGLE\": 90,\n\n \"LEFT_HIP_KNEE_ANGLE\": 80,\n \"RIGHT_HIP_KNEE_ANGLE\": 80,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"downward_facing_dog\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 180,\n \"RIGHT_HAND_HIP_ANGLE\": 180,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 80,\n \"RIGHT_HIP_KNEE_ANGLE\": 80,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"easy\": {\n \"LEFT_ARM_ANGLE\": 100,\n \"RIGHT_ARM_ANGLE\": 100,\n\n \"LEFT_HAND_HIP_ANGLE\": 20,\n \"RIGHT_HAND_HIP_ANGLE\": 20,\n\n \"LEFT_LEG_ANGLE\": 10,\n \"RIGHT_LEG_ANGLE\": 10,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 120,\n },\n\n \"fish\": {\n \"LEFT_ARM_ANGLE\": 120,\n \"RIGHT_ARM_ANGLE\": 120,\n\n \"LEFT_HAND_HIP_ANGLE\": 40,\n \"RIGHT_HAND_HIP_ANGLE\": 40,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 120,\n \"RIGHT_HIP_KNEE_ANGLE\": 120,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"forward_bend\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 120,\n \"RIGHT_HAND_HIP_ANGLE\": 120,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 0,\n \"RIGHT_HIP_KNEE_ANGLE\": 0,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"half_moon\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 90,\n },\n\n \"half_spinal\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 80,\n\n \"LEFT_HAND_HIP_ANGLE\": 45,\n \"RIGHT_HAND_HIP_ANGLE\": 45,\n\n \"LEFT_LEG_ANGLE\": 45,\n \"RIGHT_LEG_ANGLE\": 45,\n\n \"LEFT_HIP_KNEE_ANGLE\": 45,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"legs_up_the_wall\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"locust\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 0,\n \"RIGHT_HAND_HIP_ANGLE\": 0,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 120,\n \"RIGHT_HIP_KNEE_ANGLE\": 120,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"plank\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 70,\n \"RIGHT_HAND_HIP_ANGLE\": 70,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 180,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"ragdoll\": {\n \"LEFT_ARM_ANGLE\": 90,\n \"RIGHT_ARM_ANGLE\": 90,\n\n \"LEFT_HAND_HIP_ANGLE\": 140,\n \"RIGHT_HAND_HIP_ANGLE\": 140,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 10,\n \"RIGHT_HIP_KNEE_ANGLE\": 10,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"seated_forward_bend\": {\n \"LEFT_ARM_ANGLE\": 90,\n \"RIGHT_ARM_ANGLE\": 90,\n\n \"LEFT_HAND_HIP_ANGLE\": 120,\n \"RIGHT_HAND_HIP_ANGLE\": 120,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 0,\n \"RIGHT_HIP_KNEE_ANGLE\": 0,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"seated_wide_angle\": {\n \"LEFT_ARM_ANGLE\": 90,\n \"RIGHT_ARM_ANGLE\": 90,\n\n \"LEFT_HAND_HIP_ANGLE\": 40,\n \"RIGHT_HAND_HIP_ANGLE\": 40,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"staff\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 90,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 0,\n },\n\n \"tree\": {\n \"LEFT_ARM_ANGLE\": 45,\n \"RIGHT_ARM_ANGLE\": 45,\n\n \"LEFT_HAND_HIP_ANGLE\": 45,\n \"RIGHT_HAND_HIP_ANGLE\": 45,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 40,\n\n \"LEFT_HIP_KNEE_ANGLE\": 180,\n \"RIGHT_HIP_KNEE_ANGLE\": 120,\n\n \"ANGLE_BETWEEN_LEGS\": 45,\n },\n\n \"triangle1\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 120,\n \"RIGHT_HIP_KNEE_ANGLE\": 45,\n\n \"ANGLE_BETWEEN_LEGS\": 90,\n },\n\n \"triangle2\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 45,\n \"RIGHT_HIP_KNEE_ANGLE\": 120,\n\n \"ANGLE_BETWEEN_LEGS\": 90,\n },\n\n \"warrior\": {\n \"LEFT_ARM_ANGLE\": 180,\n \"RIGHT_ARM_ANGLE\": 180,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 90,\n\n \"LEFT_HIP_KNEE_ANGLE\": 120,\n \"RIGHT_HIP_KNEE_ANGLE\": 90,\n\n \"ANGLE_BETWEEN_LEGS\": 120,\n },\n\n \"wide-legged_forward_bend\": {\n \"LEFT_ARM_ANGLE\": 90,\n \"RIGHT_ARM_ANGLE\": 90,\n\n \"LEFT_HAND_HIP_ANGLE\": 90,\n \"RIGHT_HAND_HIP_ANGLE\": 90,\n\n \"LEFT_LEG_ANGLE\": 180,\n \"RIGHT_LEG_ANGLE\": 180,\n\n \"LEFT_HIP_KNEE_ANGLE\": 45,\n \"RIGHT_HIP_KNEE_ANGLE\": 45,\n\n \"ANGLE_BETWEEN_LEGS\": 90,\n },\n}\n\n\nclass EMADictSmoothing(object):\n \"\"\"Smoothes pose classification.\"\"\"\n\n def __init__(self, window_size=10, alpha=0.2):\n self._window_size = window_size\n self._alpha = alpha\n\n self._data_in_window = []\n\n def __call__(self, data):\n \"\"\"Smoothes given pose classification.\n\n Smoothing is done by computing Exponential Moving Average for every pose\n class observed in the given time window. Missed pose classes arre replaced\n with 0.\n\n Args:\n data: Dictionary with pose classification. Sample:\n {\n 'pushups_down': 8,\n 'pushups_up': 2,\n }\n\n Result:\n Dictionary in the same format but with smoothed and float instead of\n integer values. Sample:\n {\n 'pushups_down': 8.3,\n 'pushups_up': 1.7,\n }\n \"\"\"\n # Add new data to the beginning of the window for simpler code.\n self._data_in_window.insert(0, data)\n self._data_in_window = self._data_in_window[:self._window_size]\n\n # Get all keys.\n keys = set(\n [key for data in self._data_in_window for key, _ in data.items()])\n\n # Get smoothed values.\n smoothed_data = dict()\n for key in keys:\n factor = 1.0\n top_sum = 0.0\n bottom_sum = 0.0\n for data in self._data_in_window:\n value = data[key] if key in data else 0.0\n\n top_sum += factor * value\n bottom_sum += factor\n\n # Update factor.\n factor *= (1.0 - self._alpha)\n\n smoothed_data[key] = top_sum / bottom_sum\n\n return smoothed_data\n\n\nclass PoseSample(object):\n\n def __init__(self, name, landmarks, class_name, embedding):\n self.name = name\n self.landmarks = landmarks\n self.class_name = class_name\n\n self.embedding = embedding\n\n\nclass PoseSampleOutlier(object):\n\n def __init__(self, sample, detected_class, all_classes):\n self.sample = sample\n self.detected_class = detected_class\n self.all_classes = all_classes\n\n\nclass FullBodyPoseEmbedder(object):\n \"\"\"Converts 3D pose landmarks into 3D embedding.\"\"\"\n\n def __init__(self, torso_size_multiplier=2.5):\n # Multiplier to apply to the torso to get minimal body size.\n self._torso_size_multiplier = torso_size_multiplier\n\n # Names of the landmarks as they appear in the prediction.\n self._landmark_names = [\n 'nose',\n 'left_eye_inner', 'left_eye', 'left_eye_outer',\n 'right_eye_inner', 'right_eye', 'right_eye_outer',\n 'left_ear', 'right_ear',\n 'mouth_left', 'mouth_right',\n 'left_shoulder', 'right_shoulder',\n 'left_elbow', 'right_elbow',\n 'left_wrist', 'right_wrist',\n 'left_pinky_1', 'right_pinky_1',\n 'left_index_1', 'right_index_1',\n 'left_thumb_2', 'right_thumb_2',\n 'left_hip', 'right_hip',\n 'left_knee', 'right_knee',\n 'left_ankle', 'right_ankle',\n 'left_heel', 'right_heel',\n 'left_foot_index', 'right_foot_index',\n ]\n\n def __call__(self, landmarks):\n \"\"\"Normalizes pose landmarks and converts to embedding\n\n Args:\n landmarks - NumPy array with 3D landmarks of shape (N, 3).\n\n Result:\n Numpy array with pose embedding of shape (M, 3) where `M` is the number of\n pairwise distances defined in `_get_pose_distance_embedding`.\n \"\"\"\n assert landmarks.shape[0] == len(self._landmark_names), 'Unexpected number of landmarks: {}'.format(\n landmarks.shape[0])\n\n # Get pose landmarks.\n landmarks = np.copy(landmarks)\n\n # Normalize landmarks.\n landmarks = self._normalize_pose_landmarks(landmarks)\n\n # Get embedding.\n embedding = self._get_pose_distance_embedding(landmarks)\n\n return embedding\n\n def _normalize_pose_landmarks(self, landmarks):\n \"\"\"Normalizes landmarks translation and scale.\"\"\"\n landmarks = np.copy(landmarks)\n\n # Normalize translation.\n pose_center = self._get_pose_center(landmarks)\n landmarks -= pose_center\n\n # Normalize scale.\n pose_size = self._get_pose_size(landmarks, self._torso_size_multiplier)\n landmarks /= pose_size\n # Multiplication by 100 is not required, but makes it eaasier to debug.\n landmarks *= 100\n\n return landmarks\n\n def _get_pose_center(self, landmarks):\n \"\"\"Calculates pose center as point between hips.\"\"\"\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n center = (left_hip + right_hip) * 0.5\n return center\n\n def _get_pose_size(self, landmarks, torso_size_multiplier):\n \"\"\"Calculates pose size.\n\n It is the maximum of two values:\n * Torso size multiplied by `torso_size_multiplier`\n * Maximum distance from pose center to any pose landmark\n \"\"\"\n # This approach uses only 2D landmarks to compute pose size.\n landmarks = landmarks[:, :2]\n\n # Hips center.\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n hips = (left_hip + right_hip) * 0.5\n\n # Shoulders center.\n left_shoulder = landmarks[self._landmark_names.index('left_shoulder')]\n right_shoulder = landmarks[self._landmark_names.index(\n 'right_shoulder')]\n shoulders = (left_shoulder + right_shoulder) * 0.5\n\n # Torso size as the minimum body size.\n torso_size = np.linalg.norm(shoulders - hips)\n\n # Max dist to pose center.\n pose_center = self._get_pose_center(landmarks)\n max_dist = np.max(np.linalg.norm(landmarks - pose_center, axis=1))\n\n return max(torso_size * torso_size_multiplier, max_dist)\n\n def _get_pose_distance_embedding(self, landmarks):\n \"\"\"Converts pose landmarks into 3D embedding.\n\n We use several pairwise 3D distances to form pose embedding. All distances\n include X and Y components with sign. We differnt types of pairs to cover\n different pose classes. Feel free to remove some or add new.\n\n Args:\n landmarks - NumPy array with 3D landmarks of shape (N, 3).\n\n Result:\n Numpy array with pose embedding of shape (M, 3) where `M` is the number of\n pairwise distances.\n \"\"\"\n embedding = np.array([\n # One joint.\n\n self._get_distance(\n self._get_average_by_names(landmarks, 'left_hip', 'right_hip'),\n self._get_average_by_names(landmarks, 'left_shoulder', 'right_shoulder')),\n\n self._get_distance_by_names(\n landmarks, 'left_shoulder', 'left_elbow'),\n self._get_distance_by_names(\n landmarks, 'right_shoulder', 'right_elbow'),\n\n self._get_distance_by_names(landmarks, 'left_elbow', 'left_wrist'),\n self._get_distance_by_names(\n landmarks, 'right_elbow', 'right_wrist'),\n\n self._get_distance_by_names(landmarks, 'left_hip', 'left_knee'),\n self._get_distance_by_names(landmarks, 'right_hip', 'right_knee'),\n\n self._get_distance_by_names(landmarks, 'left_knee', 'left_ankle'),\n self._get_distance_by_names(\n landmarks, 'right_knee', 'right_ankle'),\n\n # Two joints.\n\n self._get_distance_by_names(\n landmarks, 'left_shoulder', 'left_wrist'),\n self._get_distance_by_names(\n landmarks, 'right_shoulder', 'right_wrist'),\n\n self._get_distance_by_names(landmarks, 'left_hip', 'left_ankle'),\n self._get_distance_by_names(landmarks, 'right_hip', 'right_ankle'),\n\n # Four joints.\n\n self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'),\n self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'),\n\n # Five joints.\n\n self._get_distance_by_names(\n landmarks, 'left_shoulder', 'left_ankle'),\n self._get_distance_by_names(\n landmarks, 'right_shoulder', 'right_ankle'),\n\n self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'),\n self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'),\n\n # Cross body.\n\n self._get_distance_by_names(\n landmarks, 'left_elbow', 'right_elbow'),\n self._get_distance_by_names(landmarks, 'left_knee', 'right_knee'),\n\n self._get_distance_by_names(\n landmarks, 'left_wrist', 'right_wrist'),\n self._get_distance_by_names(\n landmarks, 'left_ankle', 'right_ankle'),\n\n # Body bent direction.\n\n # self._get_distance(\n # self._get_average_by_names(landmarks, 'left_wrist', 'left_ankle'),\n # landmarks[self._landmark_names.index('left_hip')]),\n # self._get_distance(\n # self._get_average_by_names(landmarks, 'right_wrist', 'right_ankle'),\n # landmarks[self._landmark_names.index('right_hip')]),\n ])\n\n return embedding\n\n def _get_average_by_names(self, landmarks, name_from, name_to):\n lmk_from = landmarks[self._landmark_names.index(name_from)]\n lmk_to = landmarks[self._landmark_names.index(name_to)]\n return (lmk_from + lmk_to) * 0.5\n\n def _get_distance_by_names(self, landmarks, name_from, name_to):\n lmk_from = landmarks[self._landmark_names.index(name_from)]\n lmk_to = landmarks[self._landmark_names.index(name_to)]\n return self._get_distance(lmk_from, lmk_to)\n\n def _get_distance(self, lmk_from, lmk_to):\n return lmk_to - lmk_from\n\n\nclass PoseClassifier(object):\n \"\"\"Classifies pose landmarks.\"\"\"\n\n def __init__(self,\n pose_samples_folder,\n pose_embedder,\n file_extension='csv',\n file_separator=',',\n n_landmarks=33,\n n_dimensions=3,\n top_n_by_max_distance=30,\n top_n_by_mean_distance=10,\n axes_weights=(1., 1., 0.2)):\n self._pose_embedder = pose_embedder\n self._n_landmarks = n_landmarks\n self._n_dimensions = n_dimensions\n self._top_n_by_max_distance = top_n_by_max_distance\n self._top_n_by_mean_distance = top_n_by_mean_distance\n self._axes_weights = axes_weights\n\n self._pose_samples = self._load_pose_samples(pose_samples_folder,\n file_extension,\n file_separator,\n n_landmarks,\n n_dimensions,\n pose_embedder)\n\n def _load_pose_samples(self,\n pose_samples_folder,\n file_extension,\n file_separator,\n n_landmarks,\n n_dimensions,\n pose_embedder):\n \"\"\"Loads pose samples from a given folder.\n\n Required folder structure:\n neutral_standing.csv\n pushups_down.csv\n pushups_up.csv\n squats_down.csv\n ...\n\n Required CSV structure:\n sample_00001,x1,y1,z1,x2,y2,z2,....\n sample_00002,x1,y1,z1,x2,y2,z2,....\n ...\n \"\"\"\n # Each file in the folder represents one pose class.\n file_names = [name for name in os.listdir(\n pose_samples_folder) if name.endswith(file_extension)]\n\n pose_samples = []\n for file_name in file_names:\n # Use file name as pose class name.\n class_name = file_name[:-(len(file_extension) + 1)]\n\n # Parse CSV.\n with open(os.path.join(pose_samples_folder, file_name)) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=file_separator)\n for row in csv_reader:\n assert len(row) == n_landmarks * n_dimensions + \\\n 1, 'Wrong number of values: {}'.format(len(row))\n landmarks = np.array(row[1:], np.float32).reshape(\n [n_landmarks, n_dimensions])\n pose_samples.append(PoseSample(\n name=row[0],\n landmarks=landmarks,\n class_name=class_name,\n embedding=pose_embedder(landmarks),\n ))\n\n return pose_samples\n\n def find_pose_sample_outliers(self):\n \"\"\"Classifies each sample against the entire database.\"\"\"\n # Find outliers in target poses\n outliers = []\n for sample in self._pose_samples:\n # Find nearest poses for the target one.\n pose_landmarks = sample.landmarks.copy()\n pose_classification = self.__call__(pose_landmarks)\n class_names = [class_name for class_name, count in pose_classification.items() if\n count == max(pose_classification.values())]\n\n # Sample is an outlier if nearest poses have different class or more than\n # one pose class is detected as nearest.\n if sample.class_name not in class_names or len(class_names) != 1:\n outliers.append(PoseSampleOutlier(\n sample, class_names, pose_classification))\n\n return outliers\n\n def __call__(self, pose_landmarks):\n \"\"\"Classifies given pose.\n\n Classification is done in two stages:\n * First we pick top-N samples by MAX distance. It allows to remove samples\n that are almost the same as given pose, but has few joints bent in the\n other direction.\n * Then we pick top-N samples by MEAN distance. After outliers are removed\n on a previous step, we can pick samples that are closes on average.\n\n Args:\n pose_landmarks: NumPy array with 3D landmarks of shape (N, 3).\n\n Returns:\n Dictionary with count of nearest pose samples from the database. Sample:\n {\n 'pushups_down': 8,\n 'pushups_up': 2,\n }\n \"\"\"\n # Check that provided and target poses have the same shape.\n assert pose_landmarks.shape == (self._n_landmarks, self._n_dimensions), 'Unexpected shape: {}'.format(\n pose_landmarks.shape)\n\n # Get given pose embedding.\n pose_embedding = self._pose_embedder(pose_landmarks)\n flipped_pose_embedding = self._pose_embedder(\n pose_landmarks * np.array([-1, 1, 1]))\n\n # Filter by max distance.\n #\n # That helps to remove outliers - poses that are almost the same as the\n # given one, but has one joint bent into another direction and actually\n # represnt a different pose class.\n max_dist_heap = []\n for sample_idx, sample in enumerate(self._pose_samples):\n max_dist = min(\n np.max(np.abs(sample.embedding - pose_embedding)\n * self._axes_weights),\n np.max(np.abs(sample.embedding - flipped_pose_embedding)\n * self._axes_weights),\n )\n max_dist_heap.append([max_dist, sample_idx])\n\n max_dist_heap = sorted(max_dist_heap, key=lambda x: x[0])\n max_dist_heap = max_dist_heap[:self._top_n_by_max_distance]\n\n # Filter by mean distance.\n #\n # After removing outliers we can find the nearest pose by mean distance.\n\n # HERE2\n mean_dist_heap = []\n for _, sample_idx in max_dist_heap:\n sample = self._pose_samples[sample_idx]\n mean_dist = min(\n np.mean(np.abs(sample.embedding - pose_embedding)\n * self._axes_weights),\n np.mean(np.abs(sample.embedding - flipped_pose_embedding)\n * self._axes_weights),\n )\n mean_dist_heap.append([mean_dist, sample_idx])\n\n mean_dist_heap = sorted(mean_dist_heap, key=lambda x: x[0])\n mean_dist_heap = mean_dist_heap[:self._top_n_by_mean_distance]\n\n # print(mean_dist_heap[0])\n # print(mean_dist_heap)\n # for _, k in mean_dist_heap:\n # print(self._pose_samples[k].class_name)\n\n # Collect results into map: (class_name -> n_samples)\n class_names = [\n self._pose_samples[sample_idx].class_name for _, sample_idx in mean_dist_heap]\n result = {class_name: class_names.count(\n class_name) for class_name in set(class_names)}\n\n # print(result)\n\n return result\n\n\n# LINES_*_BODY are used when drawing the skeleton onto the source image.\n# Each variable is a list of continuous lines.\n# Each line is a list of keypoints as defined at https://google.github.io/mediapipe/solutions/pose.html#pose-landmark-model-blazepose-ghum-3d\nLINES_FULL_BODY = [[28, 30, 32, 28, 26, 24, 12, 11, 23, 25, 27, 29, 31, 27],\n [23, 24],\n [22, 16, 18, 20, 16, 14, 12],\n [21, 15, 17, 19, 15, 13, 11],\n [8, 6, 5, 4, 0, 1, 2, 3, 7],\n [10, 9],\n ]\nLINES_UPPER_BODY = [[12, 11, 23, 24, 12],\n [22, 16, 18, 20, 16, 14, 12],\n [21, 15, 17, 19, 15, 13, 11],\n [8, 6, 5, 4, 0, 1, 2, 3, 7],\n [10, 9],\n ]\n# LINE_MESH_*_BODY are used when drawing the skeleton in 3D.\nrgb = {\"right\": (0, 1, 0), \"left\": (1, 0, 0), \"middle\": (1, 1, 0)}\nLINE_MESH_FULL_BODY = [[9, 10], [4, 6], [1, 3],\n [12, 14], [14, 16], [16, 20], [20, 18], [18, 16],\n [12, 11], [11, 23], [23, 24], [24, 12],\n [11, 13], [13, 15], [15, 19], [19, 17], [17, 15],\n [24, 26], [26, 28], [32, 30],\n [23, 25], [25, 27], [29, 31]]\nLINE_TEST = [[12, 11], [11, 23], [23, 24], [24, 12]]\n\nCOLORS_FULL_BODY = [\"middle\", \"right\", \"left\",\n \"right\", \"right\", \"right\", \"right\", \"right\",\n \"middle\", \"middle\", \"middle\", \"middle\",\n \"left\", \"left\", \"left\", \"left\", \"left\",\n \"right\", \"right\", \"right\", \"left\", \"left\", \"left\"]\nCOLORS_FULL_BODY = [rgb[x] for x in COLORS_FULL_BODY]\nLINE_MESH_UPPER_BODY = [[9, 10], [4, 6], [1, 3],\n [12, 14], [14, 16], [16, 20], [20, 18], [18, 16],\n [12, 11], [11, 23], [23, 24], [24, 12],\n [11, 13], [13, 15], [15, 19], [19, 17], [17, 15]\n ]\n\n# For gesture demo\nsemaphore_flag = {\n (3, 4): 'A', (2, 4): 'B', (1, 4): 'C', (0, 4): 'D',\n (4, 7): 'E', (4, 6): 'F', (4, 5): 'G', (2, 3): 'H',\n (0, 3): 'I', (0, 6): 'J', (3, 0): 'K', (3, 7): 'L',\n (3, 6): 'M', (3, 5): 'N', (2, 1): 'O', (2, 0): 'P',\n (2, 7): 'Q', (2, 6): 'R', (2, 5): 'S', (1, 0): 'T',\n (1, 7): 'U', (0, 5): 'V', (7, 6): 'W', (7, 5): 'X',\n (1, 6): 'Y', (5, 6): 'Z'\n}\n\n\n# def to_planar(arr: np.ndarray, shape: tuple) -> list:\ndef to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:\n resized = cv2.resize(arr, shape)\n return resized.transpose(2, 0, 1)\n\n\nclass BlazeposeDepthai:\n def __init__(self, input_src=None,\n pd_path=POSE_DETECTION_MODEL,\n pd_score_thresh=0.5, pd_nms_thresh=0.3,\n lm_path=FULL_BODY_LANDMARK_MODEL,\n lm_score_threshold=0.7,\n full_body=True,\n use_gesture=False,\n use_pose=\"mountain\",\n track=\"beginners\",\n get_angles=False,\n smoothing=True,\n filter_window_size=5,\n filter_velocity_scale=10,\n show_3d=False,\n crop=False,\n multi_detection=False,\n output=None,\n internal_fps=15):\n\n self.pd_path = pd_path\n self.pd_score_thresh = pd_score_thresh\n self.pd_nms_thresh = pd_nms_thresh\n self.lm_path = lm_path\n self.lm_score_threshold = lm_score_threshold\n self.full_body = full_body\n self.use_gesture = use_gesture\n self.use_pose = use_pose\n self.track = track\n self.get_angles = get_angles\n self.smoothing = smoothing\n self.show_3d = show_3d\n self.crop = crop\n self.multi_detection = multi_detection\n if self.multi_detection:\n print(\"With multi-detection, smoothing filter is disabled.\")\n self.smoothing = False\n self.internal_fps = internal_fps\n\n if input_src == None:\n self.input_type = \"internal\" # OAK* internal color camera\n # Used when saving the output in a video file. Should be close to the real fps\n self.video_fps = internal_fps\n # Depends on cam.setResolution() in create_pipeline()\n video_height = video_width = 1080\n\n elif input_src.endswith('.jpg') or input_src.endswith('.png'):\n self.input_type = \"image\"\n self.img = cv2.imread(input_src)\n self.video_fps = 25\n video_height, video_width = self.img.shape[:2]\n else:\n self.input_type = \"video\"\n if input_src.isdigit():\n input_type = \"webcam\"\n input_src = int(input_src)\n self.cap = cv2.VideoCapture(input_src)\n self.video_fps = int(self.cap.get(cv2.CAP_PROP_FPS))\n video_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n video_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(\"Video FPS:\", self.video_fps)\n\n self.nb_kps = 33 if self.full_body else 25\n\n if self.smoothing:\n self.filter = mpu.LandmarksSmoothingFilter(\n filter_window_size, filter_velocity_scale, (self.nb_kps, 3))\n\n # Create SSD anchors\n # https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt\n anchor_options = mpu.SSDAnchorOptions(num_layers=4,\n min_scale=0.1484375,\n max_scale=0.75,\n input_size_height=128,\n input_size_width=128,\n anchor_offset_x=0.5,\n anchor_offset_y=0.5,\n strides=[8, 16, 16, 16],\n aspect_ratios=[1.0],\n reduce_boxes_in_lowest_layer=False,\n interpolated_scale_aspect_ratio=1.0,\n fixed_anchor_size=True)\n self.anchors = mpu.generate_anchors(anchor_options)\n self.nb_anchors = self.anchors.shape[0]\n print(f\"{self.nb_anchors} anchors have been created\")\n\n # Rendering flags\n self.show_pd_box = False\n self.show_pd_kps = False\n self.show_rot_rect = False\n self.show_landmarks = True\n self.show_scores = False\n self.show_gesture = self.use_gesture\n self.show_pose = self.use_pose\n self.show_angles = self.get_angles\n self.show_fps = True\n\n if self.show_3d:\n self.vis3d = o3d.visualization.Visualizer()\n self.vis3d.create_window()\n opt = self.vis3d.get_render_option()\n opt.background_color = np.asarray([0, 0, 0])\n z = min(video_height, video_width) / 3\n self.grid_floor = create_grid([0, video_height, -z], [video_width, video_height, -z],\n [video_width, video_height, z], [0, video_height, z], 5, 2, color=(1, 1, 1))\n self.grid_wall = create_grid([0, 0, z], [video_width, 0, z], [video_width, video_height, z],\n [0, video_height, z], 5, 2, color=(1, 1, 1))\n self.vis3d.add_geometry(self.grid_floor)\n self.vis3d.add_geometry(self.grid_wall)\n view_control = self.vis3d.get_view_control()\n view_control.set_up(np.array([0, -1, 0]))\n view_control.set_front(np.array([0, 0, -1]))\n\n if output is None:\n self.output = None\n else:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n self.output = cv2.VideoWriter(\n output, fourcc, self.video_fps, (video_width, video_height))\n\n def create_pipeline(self):\n print(\"INFO: Creating pipeline...\")\n # Start defining a pipeline\n pipeline = dai.Pipeline()\n pipeline.setOpenVINOVersion(\n version=dai.OpenVINO.Version.VERSION_2021_2)\n self.pd_input_length = 128\n\n if self.input_type == \"internal\":\n # ColorCamera\n print(\"INFO: Initializing Camera...\")\n cam = pipeline.createColorCamera()\n cam.setPreviewSize(self.pd_input_length, self.pd_input_length)\n cam.setResolution(\n dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n # Crop video to square shape (palm detection takes square image as input)\n self.video_size = min(cam.getVideoSize())\n cam.setVideoSize(self.video_size, self.video_size)\n #\n cam.setFps(self.internal_fps)\n cam.setInterleaved(False)\n cam.setBoardSocket(dai.CameraBoardSocket.RGB)\n cam_out = pipeline.createXLinkOut()\n cam_out.setStreamName(\"cam_out\")\n # Link video output to host for higher resolution\n cam.video.link(cam_out.input)\n\n # ffmpeg_streaming.input(\"internal\").output(\n # \"rtmps://global-live.mux.com:443/app/6299b472-c3a5-a473-2f48-4078a1718b29\", format='mp4').run()\n\n # Define pose detection model\n print(\"INFO: Creating Pose Detection Neural Network...\")\n pd_nn = pipeline.createNeuralNetwork()\n pd_nn.setBlobPath(str(Path(self.pd_path).resolve().absolute()))\n # Increase threads for detection\n # pd_nn.setNumInferenceThreads(2)\n # Specify that network takes latest arriving frame in non-blocking manner\n # Pose detection input\n if self.input_type == \"internal\":\n pd_nn.input.setQueueSize(1)\n pd_nn.input.setBlocking(False)\n cam.preview.link(pd_nn.input)\n else:\n pd_in = pipeline.createXLinkIn()\n pd_in.setStreamName(\"pd_in\")\n pd_in.out.link(pd_nn.input)\n # Pose detection output\n pd_out = pipeline.createXLinkOut()\n pd_out.setStreamName(\"pd_out\")\n pd_nn.out.link(pd_out.input)\n\n # Define landmark model\n print(\"INFO: Creating Landmark Neural Network...\")\n lm_nn = pipeline.createNeuralNetwork()\n lm_nn.setBlobPath(str(Path(self.lm_path).resolve().absolute()))\n lm_nn.setNumInferenceThreads(1)\n # Landmark input\n self.lm_input_length = 256\n lm_in = pipeline.createXLinkIn()\n lm_in.setStreamName(\"lm_in\")\n lm_in.out.link(lm_nn.input)\n # Landmark output\n lm_out = pipeline.createXLinkOut()\n lm_out.setStreamName(\"lm_out\")\n lm_nn.out.link(lm_out.input)\n\n print(\"INFO: Pipeline created\")\n print(\"INFO: Ready\")\n return pipeline\n\n def pd_postprocess(self, inference):\n scores = np.array(inference.getLayerFp16(\n \"classificators\"), dtype=np.float16) # 896\n bboxes = np.array(inference.getLayerFp16(\"regressors\"), dtype=np.float16).reshape(\n (self.nb_anchors, 12)) # 896x12\n\n # Decode bboxes\n self.regions = mpu.decode_bboxes(self.pd_score_thresh, scores, bboxes, self.anchors,\n best_only=not self.multi_detection)\n # Non maximum suppression (not needed if best_only is True)\n if self.multi_detection:\n self.regions = mpu.non_max_suppression(\n self.regions, self.pd_nms_thresh)\n\n mpu.detections_to_rect(self.regions, kp_pair=[\n 0, 1] if self.full_body else [2, 3])\n mpu.rect_transformation(self.regions, self.frame_size, self.frame_size)\n\n def pd_render(self, frame):\n for r in self.regions:\n if self.show_pd_box:\n box = (np.array(r.pd_box) * self.frame_size).astype(int)\n cv2.rectangle(\n frame, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 2)\n if self.show_pd_kps:\n # Key point 0 - mid hip center\n # Key point 1 - point that encodes size & rotation (for full body)\n # Key point 2 - mid shoulder center\n # Key point 3 - point that encodes size & rotation (for upper body)\n if self.full_body:\n # Only kp 0 and 1 used\n list_kps = [0, 1]\n else:\n # Only kp 2 and 3 used for upper body\n list_kps = [2, 3]\n for kp in list_kps:\n x = int(r.pd_kps[kp][0] * self.frame_size)\n y = int(r.pd_kps[kp][1] * self.frame_size)\n cv2.circle(frame, (x, y), 3, (0, 0, 255), -1)\n cv2.putText(frame, str(kp), (x, y + 12),\n cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\n if self.show_scores:\n cv2.putText(frame, f\"Pose score: {r.pd_score:.2f}\",\n (int(r.pd_box[0] * self.frame_size + 10),\n int((r.pd_box[1] + r.pd_box[3]) * self.frame_size + 60)),\n cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 0), 2)\n\n def lm_postprocess(self, region, inference):\n region.lm_score = inference.getLayerFp16(\"output_poseflag\")[0]\n if region.lm_score > self.lm_score_threshold:\n self.nb_active_regions += 1\n\n lm_raw = np.array(inference.getLayerFp16(\"ld_3d\")).reshape(-1, 5)\n # Each keypoint have 5 information:\n # - X,Y coordinates are local to the region of\n # interest and range from [0.0, 255.0].\n # - Z coordinate is measured in \"image pixels\" like\n # the X and Y coordinates and represents the\n # distance relative to the plane of the subject's\n # hips, which is the origin of the Z axis. Negative\n # values are between the hips and the camera;\n # positive values are behind the hips. Z coordinate\n # scale is similar with X, Y scales but has different\n # nature as obtained not via human annotation, by\n # fitting synthetic data (GHUM model) to the 2D\n # annotation.\n # - Visibility, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame and not occluded by another bigger body\n # part or another object.\n # - Presence, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame.\n\n # Normalize x,y,z. Scaling in z = scaling in x = 1/self.lm_input_length\n lm_raw[:, :3] /= self.lm_input_length\n # Apply sigmoid on visibility and presence (if used later)\n # lm_raw[:,3:5] = 1 / (1 + np.exp(-lm_raw[:,3:5]))\n\n # region.landmarks contains the landmarks normalized 3D coordinates in the relative oriented body bounding box\n region.landmarks = lm_raw[:, :3]\n # Calculate the landmark coordinate in square padded image (region.landmarks_padded)\n src = np.array([(0, 0), (1, 0), (1, 1)], dtype=np.float32)\n dst = np.array([(x, y) for x, y in region.rect_points[1:]],\n dtype=np.float32) # region.rect_points[0] is left bottom point and points going clockwise!\n mat = cv2.getAffineTransform(src, dst)\n lm_xy = np.expand_dims(region.landmarks[:self.nb_kps, :2], axis=0)\n lm_xy = np.squeeze(cv2.transform(lm_xy, mat))\n # A segment of length 1 in the coordinates system of body bounding box takes region.rect_w_a pixels in the\n # original image. Then we arbitrarily divide by 4 for a more realistic appearance.\n lm_z = region.landmarks[:self.nb_kps, 2:3] * region.rect_w_a / 4\n lm_xyz = np.hstack((lm_xy, lm_z))\n if self.smoothing:\n lm_xyz = self.filter.apply(lm_xyz)\n region.landmarks_padded = lm_xyz.astype(np.int)\n # If we added padding to make the image square, we need to remove this padding from landmark coordinates\n # region.landmarks_abs contains absolute landmark coordinates in the original image (padding removed))\n region.landmarks_abs = region.landmarks_padded.copy()\n if self.pad_h > 0:\n region.landmarks_abs[:, 1] -= self.pad_h\n if self.pad_w > 0:\n region.landmarks_abs[:, 0] -= self.pad_w\n\n if self.use_gesture:\n self.recognize_gesture(region)\n\n if self.use_pose:\n self.recognize_pose(\n region, expected_pose=self.use_pose, track=self.track)\n\n def lm_render(self, frame, region):\n if region.lm_score > self.lm_score_threshold:\n if self.show_rot_rect:\n cv2.polylines(\n frame, [np.array(region.rect_points)], True, (0, 255, 255), 2, cv2.LINE_AA)\n if self.show_landmarks:\n\n list_connections = LINES_FULL_BODY if self.full_body else LINES_UPPER_BODY\n lines = [np.array([region.landmarks_padded[point, :2]\n for point in line]) for line in list_connections]\n cv2.polylines(frame, lines, False,\n (255, 180, 90), 2, cv2.LINE_AA)\n\n for i, x_y in enumerate(region.landmarks_padded[:, :2]):\n if i > 10:\n color = (0, 255, 0) if i % 2 == 0 else (0, 0, 255)\n elif i == 0:\n color = (0, 255, 255)\n elif i in [4, 5, 6, 8, 10]:\n color = (0, 255, 0)\n else:\n color = (0, 0, 255)\n cv2.circle(frame, (x_y[0], x_y[1]), 4, color, -11)\n\n if self.show_3d:\n points = region.landmarks_abs\n lines = LINE_MESH_FULL_BODY if self.full_body else LINE_MESH_UPPER_BODY\n colors = COLORS_FULL_BODY\n for i, a_b in enumerate(lines):\n a, b = a_b\n line = create_segment(\n points[a], points[b], radius=5, color=colors[i])\n if line:\n self.vis3d.add_geometry(\n line, reset_bounding_box=False)\n\n if self.show_scores:\n cv2.putText(frame, f\"Landmark score: {region.lm_score:.2f}\",\n (int(region.pd_box[0] * self.frame_size + 10),\n int((region.pd_box[1] + region.pd_box[3]) * self.frame_size + 90)),\n cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 0), 2)\n if self.use_gesture and self.show_gesture:\n cv2.putText(frame, region.gesture, (\n int(region.pd_box[0] * self.frame_size + 10), int(region.pd_box[1] * self.frame_size - 50)),\n cv2.FONT_HERSHEY_PLAIN, 5, (0, 1190, 255), 3)\n if self.use_pose and self.show_pose:\n cv2.putText(frame, region.pose, (\n int(region.pd_box[0] * self.frame_size + 10), int(region.pd_box[1] * self.frame_size - 50)),\n cv2.FONT_HERSHEY_PLAIN, 5, (0, 1190, 255), 3)\n\n def recognize_gesture(self, r):\n\n def angle_with_y(v):\n # v: 2d vector (x,y)\n # Returns angle in degree ofv with y-axis of image plane\n if v[1] == 0:\n return 90\n angle = atan2(v[0], v[1])\n return np.degrees(angle)\n\n # For the demo, we want to recognize the flag semaphore alphabet\n # For this task, we just need to measure the angles of both arms with vertical\n\n right_arm_angle = angle_with_y(\n r.landmarks_abs[14, :2] - r.landmarks_abs[12, :2])\n left_arm_angle = angle_with_y(\n r.landmarks_abs[13, :2] - r.landmarks_abs[11, :2])\n right_pose = int((right_arm_angle + 202.5) / 45)\n left_pose = int((left_arm_angle + 202.5) / 45)\n r.gesture = semaphore_flag.get((right_pose, left_pose), None)\n\n def recognize_pose(self, r, expected_pose, track):\n\n r.pose = \"Pose not detected\"\n\n #################################################################################\n\n pose_embedder = FullBodyPoseEmbedder()\n\n if track == \"beginners\":\n pose_folder = \"./pose_csvs/beginners_poses_csvs_out\"\n elif track == \"asthma\":\n pose_folder = \"./pose_csvs/asthma_poses_csvs_out\"\n elif track == \"power\":\n pose_folder = \"./pose_csvs/power_poses_csvs_out\"\n elif track == \"immunity\":\n pose_folder = \"./pose_csvs/immunity_poses_csvs_out\"\n elif track == \"insomnia\":\n pose_folder = \"./pose_csvs/insomnia_poses_csvs_out\"\n elif track == \"cardiovascular\":\n pose_folder = \"./pose_csvs/cardiovascular_poses_csvs_out\"\n elif track == \"migraine\":\n pose_folder = \"./pose_csvs/migraine_poses_csvs_out\"\n elif track == \"pregnancy\":\n pose_folder = \"./pose_csvs/pregnancy_poses_csvs_out\"\n\n pose_classifier = PoseClassifier(\n pose_samples_folder=pose_folder,\n pose_embedder=pose_embedder,\n top_n_by_max_distance=30,\n top_n_by_mean_distance=10)\n\n assert r.landmarks_abs.shape == (\n 33, 3), 'Unexpected landmarks shape: {}'.format(r.landmarks_abs.shape)\n\n # print(r.landmarks_abs)\n # print(type(r.landmarks_abs))\n\n r.landmarks_abs = r.landmarks_abs.astype('float32')\n\n pose_classification = pose_classifier(r.landmarks_abs)\n\n pose_classification_filter = EMADictSmoothing(\n window_size=10,\n alpha=0.2)\n\n # Smooth classification using EMA.\n pose_classification_filtered = pose_classification_filter(\n pose_classification)\n\n max_sample = 0\n pose = 0\n\n # print(pose_classification_filtered)\n\n for i in pose_classification_filtered.keys():\n if pose_classification_filtered[i] > max_sample:\n pose = i\n max_sample = pose_classification_filtered[i]\n\n r.pose = pose\n\n accuracy = max_sample/10\n\n # data = {\"pose\": pose, \"accuracy\": rounded_accuracy}\n\n # value = db.child(\"123\").get()\n # if value.val() is None:\n # db.child(\"123\").set(data)\n # else:\n # db.child(\"123\").update(data)\n\n # def getAngle(firstPoint, midPoint, lastPoint):\n # result = np.degrees(atan2(lastPoint[1] - midPoint[1],lastPoint[0] - midPoint[0])\n # - atan2(firstPoint[1] - midPoint[1], firstPoint[0] - midPoint[0]))\n # result = abs(result) # Angle should never be negative\n # if (result > 180) :\n # result = 360.0 - result # Always get the acute representation of the angle\n\n # result = 360.0 - result # Always get the acute representation of the angle\n # return result\n # print(r.landmarks_abs[14,:2])\n # print(r.landmarks_abs[14])\n # print(r.landmarks_abs[14,:3])\n\n def get3DAngle(A, B, C):\n # v1 = {A[0] - B[0], A[1] - B[1], A[2] - B[2]}\n # v2 = {C[0] - B[0], C[1] - B[1], C[2] - B[2]}\n # v1mag = (A[0] * A[0] + A[1] * A[1] + A[2] * A[2])**(1/2)\n # v1norm = {A[0] / v1mag, A[1] / v1mag, A[2] / v1mag}\n # v2mag = (B[0] * B[0] + B[1] * B[1] + B[2] * B[2])**(1/2)\n # v2norm = {B[0] / v2mag, B[1] / v2mag, B[2] / v2mag}\n # res = v1norm[0] * v2norm[0] + v1norm[1] * v2norm[1] + v1norm[2] * v2norm[2]\n # angle = acos(res)\n a = np.array(A)\n b = np.array(B)\n c = np.array(C)\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / \\\n (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(cosine_angle)\n return np.degrees(angle)\n\n LEFT_ARM_ANGLE = get3DAngle(\n r.landmarks_abs[12, :3], r.landmarks_abs[14, :3], r.landmarks_abs[16, :3])\n RIGHT_ARM_ANGLE = get3DAngle(\n r.landmarks_abs[11, :3], r.landmarks_abs[13, :3], r.landmarks_abs[15, :3])\n\n LEFT_HAND_HIP_ANGLE = get3DAngle(\n r.landmarks_abs[14, :3], r.landmarks_abs[12, :3], r.landmarks_abs[24, :3])\n RIGHT_HAND_HIP_ANGLE = get3DAngle(\n r.landmarks_abs[13, :3], r.landmarks_abs[21, :3], r.landmarks_abs[23, :3])\n\n LEFT_LEG_ANGLE = get3DAngle(\n r.landmarks_abs[24, :3], r.landmarks_abs[26, :3], r.landmarks_abs[28, :3])\n RIGHT_LEG_ANGLE = get3DAngle(\n r.landmarks_abs[23, :3], r.landmarks_abs[25, :3], r.landmarks_abs[27, :3])\n\n LEFT_HIP_KNEE_ANGLE = get3DAngle(\n r.landmarks_abs[12, :3], r.landmarks_abs[24, :3], r.landmarks_abs[26, :3])\n RIGHT_HIP_KNEE_ANGLE = get3DAngle(\n r.landmarks_abs[11, :3], r.landmarks_abs[23, :3], r.landmarks_abs[25, :3])\n\n ANGLE_BETWEEN_LEGS = get3DAngle(\n r.landmarks_abs[26, :3], r.landmarks_abs[0, :3], r.landmarks_abs[25, :3])\n\n # print(\"LEFT_ARM_ANGLE\",LEFT_ARM_ANGLE)\n # print(\"RIGHT_ARM_ANGLE\",RIGHT_ARM_ANGLE)\n\n # print(\"LEFT_HAND_HIP_ANGLE\",LEFT_HAND_HIP_ANGLE)\n # print(\"RIGHT_HAND_HIP_ANGLE\",RIGHT_HAND_HIP_ANGLE)\n\n # print(\"LEFT_LEG_ANGLE\",LEFT_LEG_ANGLE)\n # print(\"RIGHT_LEG_ANGLE\",RIGHT_LEG_ANGLE)\n\n # print(\"LEFT_HIP_KNEE_ANGLE\", LEFT_HIP_KNEE_ANGLE)\n # print(\"RIGHT_HIP_KNEE_ANGLE\", RIGHT_HIP_KNEE_ANGLE)\n\n # print(\"ANGLE_BETWEEN_LEGS\", ANGLE_BETWEEN_LEGS)\n from collections import OrderedDict\n diff_dict = OrderedDict()\n feedback = \"\"\n if expected_pose == \"mountain\":\n pose_angles = POSES[\"mountain\"]\n elif expected_pose == \"tree\":\n pose_angles = POSES[\"tree\"]\n elif expected_pose == \"downwarddog\":\n pose_angles = POSES[\"downwarddog\"]\n elif expected_pose == \"child\":\n pose_angles = POSES[\"child\"]\n elif expected_pose == \"boat\":\n pose_angles = POSES[\"boat\"]\n elif expected_pose == \"bridge\":\n pose_angles = POSES[\"bridge\"]\n elif expected_pose == \"butterfly\":\n pose_angles = POSES[\"butterfly\"]\n elif expected_pose == \"camel\":\n pose_angles = POSES[\"camel\"]\n elif expected_pose == \"cat_cow\":\n pose_angles = POSES[\"cat_cow\"]\n elif expected_pose == \"chair\":\n pose_angles = POSES[\"chair\"]\n elif expected_pose == \"cobra\":\n pose_angles = POSES[\"cobra\"]\n elif expected_pose == \"corpse\":\n pose_angles = POSES[\"corpse\"]\n elif expected_pose == \"cow_face\":\n pose_angles = POSES[\"cow_face\"]\n elif expected_pose == \"downward_facing_dog\":\n pose_angles = POSES[\"downward_facing_dog\"]\n elif expected_pose == \"easy\":\n pose_angles = POSES[\"easy\"]\n elif expected_pose == \"fish\":\n pose_angles = POSES[\"fish\"]\n elif expected_pose == \"child\":\n pose_angles = POSES[\"child\"]\n elif expected_pose == \"forward_bend\":\n pose_angles = POSES[\"forward_bend\"]\n elif expected_pose == \"half_moon\":\n pose_angles = POSES[\"half_moon\"]\n elif expected_pose == \"half_spinal\":\n pose_angles = POSES[\"half_spinal\"]\n elif expected_pose == \"head_to_knee\":\n pose_angles = POSES[\"head_to_knee\"]\n elif expected_pose == \"hypnotic_sphinx\":\n pose_angles = POSES[\"hypnotic_sphinx\"]\n elif expected_pose == \"leg_up_the_wall\":\n pose_angles = POSES[\"leg_up_the_wall\"]\n elif expected_pose == \"locust\":\n pose_angles = POSES[\"locust\"]\n elif expected_pose == \"plank\":\n pose_angles = POSES[\"plank\"]\n elif expected_pose == \"ragdoll\":\n pose_angles = POSES[\"ragdoll\"]\n elif expected_pose == \"seated_forward_bend\":\n pose_angles = POSES[\"seated_forward_bend\"]\n elif expected_pose == \"seated_wide_angle\":\n pose_angles = POSES[\"seated_wide_angle\"]\n elif expected_pose == \"staff\":\n pose_angles = POSES[\"staff\"]\n elif expected_pose == \"triangle1\":\n pose_angles = POSES[\"triangle1\"]\n elif expected_pose == \"triangle2\":\n pose_angles = POSES[\"triangle2\"]\n elif expected_pose == \"warrior\":\n pose_angles = POSES[\"warrior\"]\n elif expected_pose == \"wide-legged_forward_bend\":\n pose_angles = POSES[\"wide-legged_forward_bend\"]\n\n diff_dict[\"LEFT_ARM_ANGLE\"] = pose_angles[\"LEFT_ARM_ANGLE\"] - \\\n LEFT_ARM_ANGLE\n diff_dict[\"RIGHT_ARM_ANGLE\"] = pose_angles[\"RIGHT_ARM_ANGLE\"] - \\\n RIGHT_ARM_ANGLE\n diff_dict[\"LEFT_HAND_HIP_ANGLE\"] = pose_angles[\"LEFT_HAND_HIP_ANGLE\"] - \\\n LEFT_HAND_HIP_ANGLE\n diff_dict[\"RIGHT_HAND_HIP_ANGLE\"] = pose_angles[\"RIGHT_HAND_HIP_ANGLE\"] - \\\n RIGHT_HAND_HIP_ANGLE\n diff_dict[\"RIGHT_LEG_ANGLE\"] = pose_angles[\"RIGHT_LEG_ANGLE\"] - \\\n RIGHT_LEG_ANGLE\n diff_dict[\"LEFT_HIP_KNEE_ANGLE\"] = pose_angles[\"LEFT_HIP_KNEE_ANGLE\"] - \\\n LEFT_HIP_KNEE_ANGLE\n diff_dict[\"RIGHT_HIP_KNEE_ANGLE\"] = pose_angles[\"LEFT_HIP_KNEE_ANGLE\"] - \\\n RIGHT_HIP_KNEE_ANGLE\n diff_dict[\"ANGLE_BETWEEN_LEGS\"] = pose_angles[\"ANGLE_BETWEEN_LEGS\"] - \\\n ANGLE_BETWEEN_LEGS\n\n diff_dict = sorted(diff_dict.items(),\n key=lambda item: abs(item[1]), reverse=True)\n # print(diff_dict)\n\n # global iter_count\n\n # iter_count += 1\n\n # cur_time = round(time.time() - start_time, 2)\n\n # print(f'{cur_time} seconds --> {iter_count}')\n\n new_accuracy = 0\n accuracy_threshold = 180\n\n feedback = \"{\"\n\n # jointname1 _positive:jointname_name#\n for key in diff_dict[0:2]:\n # feedback += key[0]+\":\"+str(key[1])+\"#\"\n # feedback += \"\\'\" + key[0] + \"\\':\" + key[1] + \",\"\n value = key[1]\n feedback += f'\\'{key[0]}\\':{value:.2f},'\n \n feedback = feedback[:-1] + \"}\"\n\n if pose == expected_pose:\n for key in diff_dict:\n calculated_accuracy = 1 - (abs(key[1]) / accuracy_threshold)\n new_accuracy += calculated_accuracy\n\n new_accuracy /= len(diff_dict)\n\n # calculating weighted average\n # giving more weightage to classes\n # less weightage to angles\n weighted_accuracy = accuracy * 0.6 + new_accuracy * 0.4\n rounded_accuracy = round(weighted_accuracy, 2)\n\n # if pose == \"triangle1\" or pose == \"triangle2\":\n # pose = \"triangle\"\n\n data = {\"pose\": pose, \"accuracy\": rounded_accuracy, \"feedback\": feedback}\n print(f\"RECOGNIZED: {data}\")\n\n # print(\"----------------------\")\n # print(f'POSE: {pose}')\n # print(f'ACCURACY: classes: {accuracy}, angles: {new_accuracy}')\n # print(f'WEIGHTED: {weighted_accuracy}')\n # print(f'FEEDBACK: {feedback}')\n # print(\"----------------------\\n\")\n\n\n\n def run(self):\n\n device = dai.Device(self.create_pipeline())\n device.startPipeline()\n\n # Define data queues\n if self.input_type == \"internal\":\n q_video = device.getOutputQueue(\n name=\"cam_out\", maxSize=1, blocking=False)\n q_pd_out = device.getOutputQueue(\n name=\"pd_out\", maxSize=1, blocking=False)\n q_lm_out = device.getOutputQueue(\n name=\"lm_out\", maxSize=2, blocking=False)\n q_lm_in = device.getInputQueue(name=\"lm_in\")\n else:\n q_pd_in = device.getInputQueue(name=\"pd_in\")\n q_pd_out = device.getOutputQueue(\n name=\"pd_out\", maxSize=4, blocking=True)\n q_lm_out = device.getOutputQueue(\n name=\"lm_out\", maxSize=4, blocking=True)\n q_lm_in = device.getInputQueue(name=\"lm_in\")\n\n self.fps = FPS(mean_nb_frames=20)\n\n seq_num = 0\n nb_pd_inferences = 0\n nb_lm_inferences = 0\n glob_pd_rtrip_time = 0\n glob_lm_rtrip_time = 0\n while True:\n self.fps.update()\n\n if self.input_type == \"internal\":\n in_video = q_video.get()\n video_frame = in_video.getCvFrame()\n # The image is square cropped on the device\n self.frame_size = video_frame.shape[0]\n self.pad_w = self.pad_h = 0\n else:\n if self.input_type == \"image\":\n vid_frame = self.img\n else:\n ok, vid_frame = self.cap.read()\n if not ok:\n break\n\n h, w = vid_frame.shape[:2]\n if self.crop:\n # Cropping the long side to get a square shape\n self.frame_size = min(h, w)\n dx = (w - self.frame_size) // 2\n dy = (h - self.frame_size) // 2\n video_frame = vid_frame[dy:dy +\n self.frame_size, dx:dx + self.frame_size]\n else:\n # Padding on the small side to get a square shape\n self.frame_size = max(h, w)\n self.pad_h = int((self.frame_size - h) / 2)\n self.pad_w = int((self.frame_size - w) / 2)\n video_frame = cv2.copyMakeBorder(vid_frame, self.pad_h, self.pad_h, self.pad_w, self.pad_w,\n cv2.BORDER_CONSTANT)\n\n frame_nn = dai.ImgFrame()\n frame_nn.setSequenceNum(seq_num)\n frame_nn.setWidth(self.pd_input_length)\n frame_nn.setHeight(self.pd_input_length)\n frame_nn.setData(\n to_planar(video_frame, (self.pd_input_length, self.pd_input_length)))\n pd_rtrip_time = now()\n q_pd_in.send(frame_nn)\n\n seq_num += 1\n\n annotated_frame = video_frame.copy()\n\n # Get pose detection\n inference = q_pd_out.get()\n if self.input_type != \"internal\":\n pd_rtrip_time = now() - pd_rtrip_time\n glob_pd_rtrip_time += pd_rtrip_time\n self.pd_postprocess(inference)\n self.pd_render(annotated_frame)\n nb_pd_inferences += 1\n\n # Landmarks\n self.nb_active_regions = 0\n if self.show_3d:\n self.vis3d.clear_geometries()\n self.vis3d.add_geometry(\n self.grid_floor, reset_bounding_box=False)\n self.vis3d.add_geometry(\n self.grid_wall, reset_bounding_box=False)\n for i, r in enumerate(self.regions):\n frame_nn = mpu.warp_rect_img(\n r.rect_points, video_frame, self.lm_input_length, self.lm_input_length)\n nn_data = dai.NNData()\n nn_data.setLayer(\"input_1\", to_planar(\n frame_nn, (self.lm_input_length, self.lm_input_length)))\n if i == 0:\n lm_rtrip_time = now() # We measure only for the first region\n q_lm_in.send(nn_data)\n\n # Get landmarks\n inference = q_lm_out.get()\n if i == 0:\n lm_rtrip_time = now() - lm_rtrip_time\n glob_lm_rtrip_time += lm_rtrip_time\n nb_lm_inferences += 1\n self.lm_postprocess(r, inference)\n self.lm_render(annotated_frame, r)\n if self.show_3d:\n self.vis3d.poll_events()\n self.vis3d.update_renderer()\n if self.smoothing and self.nb_active_regions == 0:\n self.filter.reset()\n\n if self.input_type != \"internal\" and not self.crop:\n annotated_frame = annotated_frame[self.pad_h:self.pad_h +\n h, self.pad_w:self.pad_w + w]\n\n if self.show_fps:\n self.fps.display(annotated_frame, orig=(50, 50), size=1, color=(240, 180, 100))\n\n # For displaying the camera view on this system\n # cv2.imshow(\"Blazepose\", annotated_frame)\n\n # HERE:\n # For streaming to RTMP URL\n # ret2, frame2 = cv2.imencode('.png', annotated_frame)\n # muxStream.stdin.write(frame2.tobytes())\n\n if self.output:\n self.output.write(annotated_frame)\n\n key = cv2.waitKey(1)\n if key == ord('q') or key == 27:\n break\n elif key == 32:\n # Pause on space bar\n cv2.waitKey(0)\n elif key == ord('1'):\n self.show_pd_box = not self.show_pd_box\n elif key == ord('2'):\n self.show_pd_kps = not self.show_pd_kps\n elif key == ord('3'):\n self.show_rot_rect = not self.show_rot_rect\n elif key == ord('4'):\n self.show_landmarks = not self.show_landmarks\n elif key == ord('5'):\n self.show_scores = not self.show_scores\n elif key == ord('6'):\n self.show_gesture = not self.show_gesture\n elif key == ord('f'):\n self.show_fps = not self.show_fps\n\n # Print some stats\n print(f\"# pose detection inferences : {nb_pd_inferences}\")\n print(f\"# landmark inferences : {nb_lm_inferences}\")\n if self.input_type != \"internal\" and nb_pd_inferences != 0:\n print(\n f\"Pose detection round trip : {glob_pd_rtrip_time / nb_pd_inferences * 1000:.1f} ms\")\n if nb_lm_inferences != 0:\n print(\n f\"Landmark round trip : {glob_lm_rtrip_time / nb_lm_inferences * 1000:.1f} ms\")\n\n if self.output:\n self.output.release()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str,\n help=\"Path to video or image file to use as input (default: internal camera\")\n parser.add_argument('-g', '--gesture', action=\"store_true\",\n help=\"enable gesture recognition\")\n parser.add_argument('-ps', '--pose', type=str,\n help=\"enable pose recognition\")\n parser.add_argument('-tr', '--track', type=str,\n help=\"select specific track\")\n parser.add_argument(\"--pd_m\", type=str,\n help=\"Path to an .blob file for pose detection model\")\n parser.add_argument(\"--lm_m\", type=str,\n help=\"Path to an .blob file for landmark model\")\n parser.add_argument('-c', '--crop', action=\"store_true\",\n help=\"Center crop frames to a square shape before feeding pose detection model\")\n parser.add_argument('-u', '--upper_body', action=\"store_true\",\n help=\"Use an upper body model\")\n parser.add_argument('--no_smoothing', action=\"store_true\",\n help=\"Disable smoothing filter\")\n parser.add_argument('--filter_window_size', type=int, default=5,\n help=\"Smoothing filter window size. Higher value adds to lag and to stability (default=%(default)i)\")\n parser.add_argument('--filter_velocity_scale', type=float, default=10,\n help=\"Smoothing filter velocity scale. Lower value adds to lag and to stability (default=%(default)s)\")\n parser.add_argument('-3', '--show_3d', action=\"store_true\",\n help=\"Display skeleton in 3d in a separate window (valid only for full body landmark model)\")\n parser.add_argument(\"-o\", \"--output\",\n help=\"Path to output video file\")\n parser.add_argument('--multi_detection', action=\"store_true\",\n help=\"Force multiple person detection (at your own risk)\")\n parser.add_argument('--internal_fps', type=int, default=15,\n help=\"Fps of internal color camera. Too high value lower NN fps (default=%(default)i)\")\n parser.add_argument('--angles', '--a', type=str,\n help=\"Get body angles\")\n\n args = parser.parse_args()\n\n if not args.pd_m:\n args.pd_m = POSE_DETECTION_MODEL\n if not args.lm_m:\n if args.upper_body:\n args.lm_m = UPPER_BODY_LANDMARK_MODEL\n else:\n args.lm_m = FULL_BODY_LANDMARK_MODEL\n ht = BlazeposeDepthai(input_src=args.input,\n pd_path=args.pd_m,\n lm_path=args.lm_m,\n full_body=not args.upper_body,\n smoothing=not args.no_smoothing,\n filter_window_size=args.filter_window_size,\n filter_velocity_scale=args.filter_velocity_scale,\n use_gesture=args.gesture,\n use_pose=args.pose,\n track=args.track,\n get_angles=args.angles,\n show_3d=args.show_3d,\n crop=args.crop,\n multi_detection=args.multi_detection,\n output=args.output,\n internal_fps=args.internal_fps)\n # python3 BlazeposeDepthai.py --input \"\n\n ht.run()\n" ]
[ [ "numpy.hstack", "numpy.dot", "numpy.expand_dims", "numpy.abs", "numpy.asarray", "numpy.degrees", "numpy.linalg.norm", "numpy.arccos", "numpy.copy", "numpy.array" ] ]
beetrootpaul/corrscope
[ "ca3d4ae61e66de2907fd01004afe23af8f5d374a", "ca3d4ae61e66de2907fd01004afe23af8f5d374a" ]
[ "corrscope/generate/__init__.py", "corrscope/utils/scipy/signal.py" ]
[ "from typing import List\n\nimport attr\nimport matplotlib as mpl\nimport matplotlib.colors\nimport numpy as np\n\nTWOPI = 2 * np.pi\n\n\[email protected]\nclass Palette:\n unclipped: np.ndarray\n clipped: List[str]\n\n\ndef gen_circular_palette() -> Palette:\n \"\"\"Return a palette of 12 *distinct* colors in #rrggbb format.\n The last color is *not* the same as the first.\n \"\"\"\n from colorspacious import cspace_convert\n\n # Based on https://youtu.be/xAoljeRJ3lU?t=887\n N = 12\n\n VALUE = 85\n SATURATION = 30\n\n # Phase of color, chosen so output[0] is red\n HUE = 0.2\n DIRECTION = -1\n\n # constant lightness\n Jp = VALUE * np.ones(N)\n\n # constant saturation, varying hue\n t = DIRECTION * np.linspace(0, 1, N, endpoint=False) + HUE\n\n ap = SATURATION * np.sin(TWOPI * t)\n bp = SATURATION * np.cos(TWOPI * t)\n\n # [N](Jp, ap, bp) real\n Jp_ap_bp = np.column_stack((Jp, ap, bp))\n\n rgb_raw = cspace_convert(Jp_ap_bp, \"CAM02-UCS\", \"sRGB1\")\n\n rgb = np.clip(rgb_raw, 0, None)\n rgb_max = np.max(rgb, axis=1).reshape(-1, 1)\n rgb /= rgb_max\n assert ((0 <= rgb) * (rgb <= 1)).all()\n\n print(f\"Peak overflow = {np.max(rgb_max - 1)}\")\n print(f\"Peak underflow = {np.min(rgb_raw - rgb)}\")\n\n rgb = [mpl.colors.to_hex(c) for c in rgb]\n print(repr(rgb))\n return Palette(rgb_raw, rgb)\n\n\nif False:\n spectral_colors = gen_circular_palette().clipped\nelse:\n spectral_colors = [\n \"#ff8189\",\n \"#ff9155\",\n \"#ffba37\",\n \"#f7ff52\",\n \"#95ff85\",\n \"#16ffc1\",\n \"#00ffff\",\n \"#4dccff\",\n \"#86acff\",\n \"#b599ff\",\n \"#ed96ff\",\n \"#ff87ca\",\n ]\n", "from bisect import bisect_left\n\nimport numpy as np\n\ndef correlate_valid(buffer: np.ndarray, kernel: np.ndarray) -> np.ndarray:\n \"\"\"\n Based on scipy.correlate. buffer must be longer (or equal) to kernel. Returns an\n array of length (buffer - kernel + 1) without edge effects, much like mode=\"valid\".\n \"\"\"\n buffer = np.asarray(buffer)\n kernel = np.asarray(kernel)\n\n assert buffer.ndim == kernel.ndim == 1\n kernel = _reverse_and_conj(kernel)\n\n # Taken from scipy fftconvolve()\n\n kernel_support = len(kernel) - 1\n out_nsamp = len(buffer) - kernel_support\n\n # fft_nsamp = 1 << (out_nsamp - 1).bit_length()\n fft_nsamp = next_fast_len(len(buffer))\n assert fft_nsamp >= out_nsamp\n\n # return convolve(in1, _reverse_and_conj(in2), mode, method)\n sp1 = np.fft.rfft(buffer, fft_nsamp)\n # Already reversed above.\n sp2 = np.fft.rfft(kernel, fft_nsamp)\n\n corr = np.fft.irfft(sp1 * sp2, fft_nsamp)\n # Slice the returned data to the valid region, for complex math reasons.\n ret = corr[kernel_support : kernel_support + out_nsamp].copy()\n return ret\n\n\n\ndef correlate(in1: np.ndarray, in2: np.ndarray) -> np.ndarray:\n \"\"\"\n Based on scipy.correlate.\n Assumed: mode='full', method='fft'\n \"\"\"\n in1 = np.asarray(in1)\n in2 = np.asarray(in2)\n\n assert in1.ndim == in2.ndim == 1\n in2 = _reverse_and_conj(in2)\n\n # Taken from scipy fftconvolve()\n\n out_nsamp = len(in1) + len(in2) - 1\n\n # fft_nsamp = 1 << (out_nsamp - 1).bit_length()\n fft_nsamp = next_fast_len(out_nsamp)\n assert fft_nsamp >= out_nsamp\n\n # return convolve(in1, _reverse_and_conj(in2), mode, method)\n sp1 = np.fft.rfft(in1, fft_nsamp)\n sp2 = np.fft.rfft(in2, fft_nsamp)\n ret = np.fft.irfft(sp1 * sp2, fft_nsamp)[:out_nsamp].copy()\n\n return ret\n\n\ndef _reverse_and_conj(x: np.ndarray) -> np.ndarray:\n return x[::-1].conj()\n\n\ndef next_fast_len(target: int) -> int:\n \"\"\"\n Find the next fast size of input data to `fft`, for zero-padding, etc.\n\n SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this\n returns the next composite of the prime factors 2, 3, and 5 which is\n greater than or equal to `target`. (These are also known as 5-smooth\n numbers, regular numbers, or Hamming numbers.)\n\n Parameters\n ----------\n target : int\n Length to start searching from. Must be a positive integer.\n\n Returns\n -------\n out : int\n The first 5-smooth number greater than or equal to `target`.\n\n Notes\n -----\n .. versionadded:: 0.18.0\n\n Examples\n --------\n On a particular machine, an FFT of prime length takes 133 ms:\n\n >>> from scipy import fftpack\n >>> min_len = 10007 # prime length is worst case for speed\n >>> a = np.random.randn(min_len)\n >>> b = fftpack.fft(a)\n\n Zero-padding to the next 5-smooth length reduces computation time to\n 211 us, a speedup of 630 times:\n\n >>> fftpack.helper.next_fast_len(min_len)\n 10125\n >>> b = fftpack.fft(a, 10125)\n\n Rounding up to the next power of 2 is not optimal, taking 367 us to\n compute, 1.7 times as long as the 5-smooth size:\n\n >>> b = fftpack.fft(a, 16384)\n\n \"\"\"\n hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48,\n 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128,\n 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250,\n 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450,\n 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729,\n 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125,\n 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536,\n 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160,\n 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916,\n 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840,\n 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000,\n 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400,\n 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100,\n 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000)\n\n target = int(target)\n\n if target <= 6:\n return target\n\n # Quickly check if it's already a power of 2\n if not (target & (target-1)):\n return target\n\n # Get result quickly for small sizes, since FFT itself is similarly fast.\n if target <= hams[-1]:\n return hams[bisect_left(hams, target)]\n\n match = float('inf') # Anything found will be smaller\n p5 = 1\n while p5 < target:\n p35 = p5\n while p35 < target:\n # Ceiling integer division, avoiding conversion to float\n # (quotient = ceil(target / p35))\n quotient = -(-target // p35)\n\n # Quickly find next power of 2 >= quotient\n p2 = 2**((quotient - 1).bit_length())\n\n N = p2 * p35\n if N == target:\n return N\n elif N < match:\n match = N\n p35 *= 3\n if p35 == target:\n return p35\n if p35 < match:\n match = p35\n p5 *= 5\n if p5 == target:\n return p5\n if p5 < match:\n match = p5\n return match\n" ]
[ [ "numpy.linspace", "numpy.clip", "numpy.min", "numpy.cos", "matplotlib.colors.to_hex", "numpy.sin", "numpy.ones", "numpy.max", "numpy.column_stack" ], [ "numpy.asarray", "numpy.fft.irfft", "numpy.fft.rfft" ] ]
ss892714028/3d_search
[ "e8ff3d5790d5fa072ade04f4fbb71688f980f2e4" ]
[ "preprocess_npy.py" ]
[ "import glob as glob\nimport numpy as np\nimport os\nimport pymesh\nimport traceback\nfrom logging import Logger\nfrom config import SEARCH_FEATURE_PATH, LOAD_FEATURE_PATH\nimport getopt\nimport sys\n\n\ndef find_neighbor(faces, faces_contain_this_vertex, vf1, vf2, except_face):\n for i in (faces_contain_this_vertex[vf1] & faces_contain_this_vertex[vf2]):\n if i != except_face:\n face = faces[i].tolist()\n try:\n face.remove(vf1)\n face.remove(vf2)\n except:\n continue\n return i\n\n return except_face\n\ndef extract_features(in_path, file_name):\n \"\"\"\n Extract feature for a single compressed 3d model\n Args:\n data_root ([str]): path of where the preprocessed 3d models are stored\n out_root ([str]): path of where the extracted features (npy file) will be stored\n file_name ([str]): file name of the current 3d model\n \"\"\"\n \n try:\n path = os.path.join(in_path, file_name)\n mesh = pymesh.load_mesh(path)\n\n # delete the file after load\n os.remove(path)\n\n # clean up\n mesh, _ = pymesh.remove_isolated_vertices(mesh)\n mesh, _ = pymesh.remove_duplicated_vertices(mesh)\n\n # get elements\n vertices = mesh.vertices.copy()\n faces = mesh.faces.copy()\n\n # move to center\n center = (np.max(vertices, 0) + np.min(vertices, 0)) / 2\n vertices -= center\n\n # normalize\n max_len = np.max(vertices[:, 0]**2 + vertices[:, 1]**2 + vertices[:, 2]**2)\n vertices /= np.sqrt(max_len)\n\n # get normal vector\n mesh = pymesh.form_mesh(vertices, faces)\n mesh.add_attribute('face_normal')\n face_normal = mesh.get_face_attribute('face_normal')\n\n # get neighbors\n faces_contain_this_vertex = []\n for i in range(len(vertices)):\n faces_contain_this_vertex.append(set([]))\n centers = []\n corners = []\n for i in range(len(faces)):\n [v1, v2, v3] = faces[i]\n x1, y1, z1 = vertices[v1]\n x2, y2, z2 = vertices[v2]\n x3, y3, z3 = vertices[v3]\n centers.append([(x1 + x2 + x3) / 3, (y1 + y2 + y3) / 3, (z1 + z2 + z3) / 3])\n corners.append([x1, y1, z1, x2, y2, z2, x3, y3, z3])\n faces_contain_this_vertex[v1].add(i)\n faces_contain_this_vertex[v2].add(i)\n faces_contain_this_vertex[v3].add(i)\n\n neighbors = []\n for i in range(len(faces)):\n [v1, v2, v3] = faces[i]\n n1 = find_neighbor(faces, faces_contain_this_vertex, v1, v2, i)\n n2 = find_neighbor(faces, faces_contain_this_vertex, v2, v3, i)\n n3 = find_neighbor(faces, faces_contain_this_vertex, v3, v1, i)\n neighbors.append([n1, n2, n3])\n\n centers = np.array(centers)\n corners = np.array(corners)\n faces = np.concatenate([centers, corners, face_normal], axis=1)\n neighbors = np.array(neighbors)\n _, filename = os.path.split(file_name)\n filename = in_path + '/' + filename[:-4] + '.npz'\n\n \n # save as npy\n np.savez(filename,\n faces=faces, neighbors=neighbors)\n print(f\"saved {filename}\")\n # return the path of the saved npy file\n return filename\n \n except Exception as exc:\n print(f\"{file_name} broken\")\n traceback.print_exc()\n\n\ndef run_batch(in_path):\n\n for file_name in os.listdir(in_path):\n if file_name.endswith(\"off\"):\n extract_features(in_path, file_name)\n print(\"success!\")\n\n\ndef run_single(in_path, filename):\n extract_features(in_path, filename)\n print(\"success!\")\n\n\ndef main(search_path, load_path):\n opts, args = getopt.getopt(sys.argv[1:], \"h\", [\"help\", \"batch=\", \"filename=\"])\n for opt_name, opt_value in opts:\n if opt_name in (\"-h\", \"--help\"):\n print(\"Help yourself please :)\")\n elif opt_name == \"--batch\":\n run_type = opt_value\n elif opt_name == \"--filename\":\n filename = opt_value\n if run_type == \"T\":\n run_batch(load_path)\n elif run_type == \"F\":\n if filename:\n run_single(search_path, filename)\n else:\n print(\"where is the file name?\")\n else:\n print(\"T or F\")\n\n\n \nif __name__ == '__main__':\n main(SEARCH_FEATURE_PATH, LOAD_FEATURE_PATH)\n" ]
[ [ "numpy.savez", "numpy.sqrt", "numpy.min", "numpy.concatenate", "numpy.max", "numpy.array" ] ]
robinvanemden/TotemBobbiScripts
[ "675774ab407dce762d1d4dd20a68fc41a9676cf8" ]
[ "import_bobbi.py" ]
[ "#######################################################################\n#\n# import_bobbi.py\n#\n# Code: Robin van Emden - robin at pavlov.tech - 2017\n#\n# Totem Bobbi: http://www.totemopenhealth.com/\n#\n#######################################################################\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os \nfrom pathlib import Path\n\n###################### Set csv filename ###############################\n\ncsv_file = \"data/8-sdataHR.csv\"\n\n#######################################################################\n\n# Retrieve some file info\nf = Path(csv_file) # path object\nf_no_ext = os.path.join(str(f.parent), f.stem) # remove extension\n\n# Save Pickled DF - faster to save and load later\nis_cached = Path(f_no_ext +\".df\") # check for cache file\nif is_cached.is_file(): # cache exists?\n dataset = pd.read_pickle(f_no_ext +\".df\") # yes - load cache\n \nelse:\n dataset = pd.read_csv( f_no_ext +\".csv\", # no - load csv\n index_col=False,\n sep=';',\n dtype={'ms': np.int32,\n 'heartrate': np.int16, \n 'BPM': np.int16 } ) \n\n dataset['hart'] = dataset['heartrate'] # set hart\n # dataset['hart'] = -dataset['hart']+1000 # if needed, flip\n dataset.to_pickle(f_no_ext +\".df\") # save to cache\n \n# Save small subset of data as a sample\ndataset = dataset[(dataset['ms'] >= 104000) \n & (dataset['ms'] <= 115000)]\n\ndataset = dataset.reset_index(drop=True) # Start index at 0\ndataset.to_csv(f_no_ext + \"_sample.csv\"\n ,sep=';',index=False) # Save CSV sample\ndataset.to_pickle(f_no_ext +\"_sample.df\") # Save DF sample\n\n# Lets plot our sample, see if its ok\nplt.title(\"Heart Rate Signal\") #The title of our plot\nplt.plot(dataset.ms,dataset.hart) #Draw the plot object\nplt.show() #Display the plot \n" ]
[ [ "pandas.read_csv", "pandas.read_pickle", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
EnriqueL8/qiskit-terra
[ "08b801f1f8598c4e44680b4a75c232ed92db0262", "08b801f1f8598c4e44680b4a75c232ed92db0262" ]
[ "qiskit/providers/basicaer/unitary_simulator.py", "qiskit/quantum_info/operators/scalar_op.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=arguments-differ\n\n\"\"\"Contains a Python simulator that returns the unitary of the circuit.\n\nIt simulates a unitary of a quantum circuit that has been compiled to run on\nthe simulator. It is exponential in the number of qubits.\n\n.. code-block:: python\n\n UnitarySimulator().run(qobj)\n\nWhere the input is a Qobj object and the output is a BasicAerJob object, which can\nlater be queried for the Result object. The result will contain a 'unitary'\ndata field, which is a 2**n x 2**n complex numpy array representing the\ncircuit's unitary matrix.\n\"\"\"\nimport logging\nimport uuid\nimport time\nfrom math import log2, sqrt\nimport numpy as np\nfrom qiskit.util import local_hardware_info\nfrom qiskit.providers.models import QasmBackendConfiguration\nfrom qiskit.providers import BaseBackend\nfrom qiskit.providers.basicaer.basicaerjob import BasicAerJob\nfrom qiskit.result import Result\nfrom .exceptions import BasicAerError\nfrom .basicaertools import single_gate_matrix\nfrom .basicaertools import cx_gate_matrix\nfrom .basicaertools import einsum_matmul_index\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO add [\"status\"] = 'DONE', 'ERROR' especially for empty circuit error\n# does not show up\n\n\nclass UnitarySimulatorPy(BaseBackend):\n \"\"\"Python implementation of a unitary simulator.\"\"\"\n\n MAX_QUBITS_MEMORY = int(log2(sqrt(local_hardware_info()['memory'] * (1024 ** 3) / 16)))\n\n DEFAULT_CONFIGURATION = {\n 'backend_name': 'unitary_simulator',\n 'backend_version': '1.0.0',\n 'n_qubits': min(24, MAX_QUBITS_MEMORY),\n 'url': 'https://github.com/Qiskit/qiskit-terra',\n 'simulator': True,\n 'local': True,\n 'conditional': False,\n 'open_pulse': False,\n 'memory': False,\n 'max_shots': 65536,\n 'coupling_map': None,\n 'description': 'A python simulator for unitary matrix corresponding to a circuit',\n 'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'unitary'],\n 'gates': [\n {\n 'name': 'u1',\n 'parameters': ['lambda'],\n 'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'\n },\n {\n 'name': 'u2',\n 'parameters': ['phi', 'lambda'],\n 'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'\n },\n {\n 'name': 'u3',\n 'parameters': ['theta', 'phi', 'lambda'],\n 'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'\n },\n {\n 'name': 'cx',\n 'parameters': ['c', 't'],\n 'qasm_def': 'gate cx c,t { CX c,t; }'\n },\n {\n 'name': 'id',\n 'parameters': ['a'],\n 'qasm_def': 'gate id a { U(0,0,0) a; }'\n },\n {\n 'name': 'unitary',\n 'parameters': ['matrix'],\n 'qasm_def': 'unitary(matrix) q1, q2,...'\n }\n ]\n }\n\n DEFAULT_OPTIONS = {\n \"initial_unitary\": None,\n \"chop_threshold\": 1e-15\n }\n\n def __init__(self, configuration=None, provider=None):\n super().__init__(configuration=(\n configuration or QasmBackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),\n provider=provider)\n\n # Define attributes inside __init__.\n self._unitary = None\n self._number_of_qubits = 0\n self._initial_unitary = None\n self._chop_threshold = 1e-15\n\n def _add_unitary(self, gate, qubits):\n \"\"\"Apply an N-qubit unitary matrix.\n\n Args:\n gate (matrix_like): an N-qubit unitary matrix\n qubits (list): the list of N-qubits.\n \"\"\"\n # Get the number of qubits\n num_qubits = len(qubits)\n # Compute einsum index string for 1-qubit matrix multiplication\n indexes = einsum_matmul_index(qubits, self._number_of_qubits)\n # Convert to complex rank-2N tensor\n gate_tensor = np.reshape(np.array(gate, dtype=complex),\n num_qubits * [2, 2])\n # Apply matrix multiplication\n self._unitary = np.einsum(indexes, gate_tensor, self._unitary,\n dtype=complex, casting='no')\n\n def _validate_initial_unitary(self):\n \"\"\"Validate an initial unitary matrix\"\"\"\n # If initial unitary isn't set we don't need to validate\n if self._initial_unitary is None:\n return\n # Check unitary is correct length for number of qubits\n shape = np.shape(self._initial_unitary)\n required_shape = (2 ** self._number_of_qubits,\n 2 ** self._number_of_qubits)\n if shape != required_shape:\n raise BasicAerError('initial unitary is incorrect shape: ' +\n '{} != 2 ** {}'.format(shape, required_shape))\n\n def _set_options(self, qobj_config=None, backend_options=None):\n \"\"\"Set the backend options for all experiments in a qobj\"\"\"\n # Reset default options\n self._initial_unitary = self.DEFAULT_OPTIONS[\"initial_unitary\"]\n self._chop_threshold = self.DEFAULT_OPTIONS[\"chop_threshold\"]\n if backend_options is None:\n backend_options = {}\n\n # Check for custom initial statevector in backend_options first,\n # then config second\n if 'initial_unitary' in backend_options:\n self._initial_unitary = np.array(backend_options['initial_unitary'],\n dtype=complex)\n elif hasattr(qobj_config, 'initial_unitary'):\n self._initial_unitary = np.array(qobj_config.initial_unitary,\n dtype=complex)\n if self._initial_unitary is not None:\n # Check the initial unitary is actually unitary\n shape = np.shape(self._initial_unitary)\n if len(shape) != 2 or shape[0] != shape[1]:\n raise BasicAerError(\"initial unitary is not a square matrix\")\n iden = np.eye(len(self._initial_unitary))\n u_dagger_u = np.dot(self._initial_unitary.T.conj(),\n self._initial_unitary)\n norm = np.linalg.norm(u_dagger_u - iden)\n if round(norm, 10) != 0:\n raise BasicAerError(\"initial unitary is not unitary\")\n # Check the initial statevector is normalized\n\n # Check for custom chop threshold\n # Replace with custom options\n if 'chop_threshold' in backend_options:\n self._chop_threshold = backend_options['chop_threshold']\n elif hasattr(qobj_config, 'chop_threshold'):\n self._chop_threshold = qobj_config.chop_threshold\n\n def _initialize_unitary(self):\n \"\"\"Set the initial unitary for simulation\"\"\"\n self._validate_initial_unitary()\n if self._initial_unitary is None:\n # Set to identity matrix\n self._unitary = np.eye(2 ** self._number_of_qubits,\n dtype=complex)\n else:\n self._unitary = self._initial_unitary.copy()\n # Reshape to rank-N tensor\n self._unitary = np.reshape(self._unitary,\n self._number_of_qubits * [2, 2])\n\n def _get_unitary(self):\n \"\"\"Return the current unitary\"\"\"\n unitary = np.reshape(self._unitary, 2 * [2 ** self._number_of_qubits])\n unitary[abs(unitary) < self._chop_threshold] = 0.0\n return unitary\n\n def run(self, qobj, backend_options=None):\n \"\"\"Run qobj asynchronously.\n\n Args:\n qobj (Qobj): payload of the experiment\n backend_options (dict): backend options\n\n Returns:\n BasicAerJob: derived from BaseJob\n\n Additional Information::\n\n backend_options: Is a dict of options for the backend. It may contain\n * \"initial_unitary\": matrix_like\n * \"chop_threshold\": double\n\n The \"initial_unitary\" option specifies a custom initial unitary\n matrix for the simulator to be used instead of the identity\n matrix. This size of this matrix must be correct for the number\n of qubits inall experiments in the qobj.\n\n The \"chop_threshold\" option specifies a truncation value for\n setting small values to zero in the output unitary. The default\n value is 1e-15.\n\n Example::\n\n backend_options = {\n \"initial_unitary\": np.array([[1, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0]])\n \"chop_threshold\": 1e-15\n }\n \"\"\"\n self._set_options(qobj_config=qobj.config,\n backend_options=backend_options)\n job_id = str(uuid.uuid4())\n job = BasicAerJob(self, job_id, self._run_job, qobj)\n job.submit()\n return job\n\n def _run_job(self, job_id, qobj):\n \"\"\"Run experiments in qobj.\n\n Args:\n job_id (str): unique id for the job.\n qobj (Qobj): job description\n\n Returns:\n Result: Result object\n \"\"\"\n self._validate(qobj)\n result_list = []\n start = time.time()\n for experiment in qobj.experiments:\n result_list.append(self.run_experiment(experiment))\n end = time.time()\n result = {'backend_name': self.name(),\n 'backend_version': self._configuration.backend_version,\n 'qobj_id': qobj.qobj_id,\n 'job_id': job_id,\n 'results': result_list,\n 'status': 'COMPLETED',\n 'success': True,\n 'time_taken': (end - start),\n 'header': qobj.header.to_dict()}\n\n return Result.from_dict(result)\n\n def run_experiment(self, experiment):\n \"\"\"Run an experiment (circuit) and return a single experiment result.\n\n Args:\n experiment (QobjExperiment): experiment from qobj experiments list\n\n Returns:\n dict: A result dictionary which looks something like::\n\n {\n \"name\": name of this experiment (obtained from qobj.experiment header)\n \"seed\": random seed used for simulation\n \"shots\": number of shots used in the simulation\n \"data\":\n {\n \"unitary\": [[[0.0, 0.0], [1.0, 0.0]],\n [[1.0, 0.0], [0.0, 0.0]]]\n },\n \"status\": status string for the simulation\n \"success\": boolean\n \"time taken\": simulation time of this single experiment\n }\n\n Raises:\n BasicAerError: if the number of qubits in the circuit is greater than 24.\n Note that the practical qubit limit is much lower than 24.\n \"\"\"\n start = time.time()\n self._number_of_qubits = experiment.header.n_qubits\n\n # Validate the dimension of initial unitary if set\n self._validate_initial_unitary()\n self._initialize_unitary()\n\n for operation in experiment.instructions:\n if operation.name == 'unitary':\n qubits = operation.qubits\n gate = operation.params[0]\n self._add_unitary(gate, qubits)\n # Check if single gate\n elif operation.name in ('U', 'u1', 'u2', 'u3'):\n params = getattr(operation, 'params', None)\n qubit = operation.qubits[0]\n gate = single_gate_matrix(operation.name, params)\n self._add_unitary(gate, [qubit])\n elif operation.name in ('id', 'u0'):\n pass\n # Check if CX gate\n elif operation.name in ('CX', 'cx'):\n qubit0 = operation.qubits[0]\n qubit1 = operation.qubits[1]\n gate = cx_gate_matrix()\n self._add_unitary(gate, [qubit0, qubit1])\n # Check if barrier\n elif operation.name == 'barrier':\n pass\n else:\n backend = self.name()\n err_msg = '{0} encountered unrecognized operation \"{1}\"'\n raise BasicAerError(err_msg.format(backend, operation.name))\n # Add final state to data\n data = {'unitary': self._get_unitary()}\n end = time.time()\n return {'name': experiment.header.name,\n 'shots': 1,\n 'data': data,\n 'status': 'DONE',\n 'success': True,\n 'time_taken': (end - start),\n 'header': experiment.header.to_dict()}\n\n def _validate(self, qobj):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\n Some of these may later move to backend schemas.\n 1. No shots\n 2. No measurements in the middle\n \"\"\"\n n_qubits = qobj.config.n_qubits\n max_qubits = self.configuration().n_qubits\n if n_qubits > max_qubits:\n raise BasicAerError('Number of qubits {} '.format(n_qubits) +\n 'is greater than maximum ({}) '.format(max_qubits) +\n 'for \"{}\".'.format(self.name()))\n if hasattr(qobj.config, 'shots') and qobj.config.shots != 1:\n logger.info('\"%s\" only supports 1 shot. Setting shots=1.',\n self.name())\n qobj.config.shots = 1\n for experiment in qobj.experiments:\n name = experiment.header.name\n if getattr(experiment.config, 'shots', 1) != 1:\n logger.info('\"%s\" only supports 1 shot. '\n 'Setting shots=1 for circuit \"%s\".',\n self.name(), name)\n experiment.config.shots = 1\n for operation in experiment.instructions:\n if operation.name in ['measure', 'reset']:\n raise BasicAerError('Unsupported \"%s\" instruction \"%s\" ' +\n 'in circuit \"%s\" ', self.name(),\n operation.name, name)\n", "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nScalarOp class\n\"\"\"\n\nfrom numbers import Number\nimport numpy as np\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.operators.base_operator import BaseOperator\nfrom qiskit.quantum_info.operators.operator import Operator\n\n\nclass ScalarOp(BaseOperator):\n \"\"\"Scalar identity operator class.\n\n This is a symbolic representation of an scalar identity operator on\n multiple subsystems. It may be used to initialize a symbolic scalar\n multiplication of an identity and then be implicitly converted to other\n kinds of operator subclasses by using the :meth:`compose`, :meth:`dot`,\n :meth:`tensor`, :meth:`expand` methods.\n \"\"\"\n\n def __init__(self, dims, coeff=1):\n \"\"\"Initialize an operator object.\n\n Args:\n dims (int or tuple): subsystem dimensions.\n coeff (Number): scalar coefficient for the identity\n operator (Default: 1).\n\n Raises:\n QiskitError: If the optional coefficient is invalid.\n \"\"\"\n if not isinstance(coeff, Number):\n QiskitError(\"coeff {} must be a number.\".format(coeff))\n self._coeff = coeff\n input_dims = self._automatic_dims(dims, np.product(dims))\n super().__init__(input_dims, input_dims)\n\n def __repr__(self):\n return 'ScalarOp({}, coeff={})'.format(\n self._input_dims, self.coeff)\n\n @property\n def coeff(self):\n \"\"\"Return the coefficient\"\"\"\n return self._coeff\n\n def conjugate(self):\n \"\"\"Return the conjugate of the operator.\"\"\"\n ret = self.copy()\n ret._coeff = np.conjugate(self.coeff)\n return ret\n\n def transpose(self):\n \"\"\"Return the transpose of the operator.\"\"\"\n return self.copy()\n\n def is_unitary(self, atol=None, rtol=None):\n \"\"\"Return True if operator is a unitary matrix.\"\"\"\n if atol is None:\n atol = self._atol\n if rtol is None:\n rtol = self._rtol\n return np.isclose(np.abs(self.coeff), 1, atol=atol, rtol=rtol)\n\n def to_matrix(self):\n \"\"\"Convert to a Numpy matrix.\"\"\"\n dim, _ = self.dim\n iden = np.eye(dim, dtype=complex)\n return self.coeff * iden\n\n def to_operator(self):\n \"\"\"Convert to an Operator object.\"\"\"\n return Operator(self.to_matrix(),\n input_dims=self._input_dims,\n output_dims=self._output_dims)\n\n def tensor(self, other):\n \"\"\"Return the tensor product operator self ⊗ other.\n\n Args:\n other (BaseOperator): an operator object.\n\n Returns:\n ScalarOp: if other is an ScalarOp.\n BaseOperator: if other is not an ScalarOp.\n \"\"\"\n if not isinstance(other, BaseOperator):\n other = Operator(other)\n if isinstance(other, ScalarOp):\n coeff = self.coeff * other.coeff\n dims = other._input_dims + self._input_dims\n return ScalarOp(dims, coeff=coeff)\n return other.expand(self)\n\n def expand(self, other):\n \"\"\"Return the tensor product operator other ⊗ self.\n\n Args:\n other (BaseOperator): an operator object.\n\n Returns:\n ScalarOp: if other is an ScalarOp.\n BaseOperator: if other is not an ScalarOp.\n \"\"\"\n if not isinstance(other, BaseOperator):\n other = Operator(other)\n if isinstance(other, ScalarOp):\n coeff = self.coeff * other.coeff\n dims = self._input_dims + other._input_dims\n return ScalarOp(dims, coeff=coeff)\n return other.tensor(self)\n\n def compose(self, other, qargs=None, front=False):\n \"\"\"Return the composed operator.\n\n Args:\n other (BaseOperator): an operator object.\n qargs (list or None): a list of subsystem positions to apply\n other on. If None apply on all\n subsystems [default: None].\n front (bool): If True compose using right operator multiplication,\n instead of left multiplication [default: False].\n\n Returns:\n BaseOperator: The operator self @ other.\n\n Raises:\n QiskitError: if other has incompatible dimensions for specified\n subsystems.\n\n Additional Information:\n Composition (``@``) is defined as `left` matrix multiplication for\n matrix operators. That is that ``A @ B`` is equal to ``B * A``.\n Setting ``front=True`` returns `right` matrix multiplication\n ``A * B`` and is equivalent to the :meth:`dot` method.\n \"\"\"\n if qargs is None:\n qargs = getattr(other, 'qargs', None)\n\n if not isinstance(other, BaseOperator):\n other = Operator(other)\n\n input_dims, output_dims = self._get_compose_dims(other, qargs, front)\n\n # If other is also an ScalarOp we only need to\n # update the coefficient and dimensions\n if isinstance(other, ScalarOp):\n coeff = self.coeff * other.coeff\n return ScalarOp(input_dims, coeff=coeff)\n\n # If we are composing on the full system we return the\n # other operator with reshaped dimensions\n if qargs is None:\n ret = other.reshape(input_dims, output_dims)\n # Other operator might not support scalar multiplication\n # so we treat the identity as a special case to avoid a\n # possible error\n if self.coeff == 1:\n return ret\n return self.coeff * ret\n\n # For qargs composition we initialize the scalar operator\n # as an instance of the other BaseOperators subclass. We then\n # perform subsystem qargs composition using the BaseOperator\n # subclasses compose method.\n # Note that this will raise an error if the other operator does\n # not support initialization from a ScalarOp or the ScalarOps\n # `to_operator` method).\n return other.__class__(self).compose(\n other, qargs=qargs, front=front)\n\n def power(self, n):\n \"\"\"Return the power of the ScalarOp.\n\n Args:\n n (Number): the exponent for the scalar op.\n\n Returns:\n ScalarOp: the ``coeff ** n`` ScalarOp.\n\n Raises:\n QiskitError: if the input and output dimensions of the operator\n are not equal, or the power is not a positive integer.\n \"\"\"\n ret = self.copy()\n ret._coeff = self.coeff ** n\n return ret\n\n def _add(self, other):\n \"\"\"Return the operator self + other.\n\n Args:\n other (BaseOperator): an operator object.\n\n Returns:\n ScalarOp: if other is an ScalarOp.\n BaseOperator: if other is not an ScalarOp.\n\n Raises:\n QiskitError: if other has incompatible dimensions.\n \"\"\"\n if not isinstance(other, BaseOperator):\n other = Operator(other)\n\n self._validate_add_dims(other)\n\n # First we check the special case where coeff=0. In this case\n # we simply return the other operator reshaped so that its\n # subsystem dimensions are equal to the current operator for the\n # case where total dimensions agree but subsystem dimensions differ.\n if self.coeff == 0:\n return other.reshape(self._input_dims, self._output_dims)\n\n # Next if we are adding two ScalarOps we return a ScalarOp\n if isinstance(other, ScalarOp):\n coeff1 = 1 if self.coeff is None else self.coeff\n coeff2 = 1 if other.coeff is None else other.coeff\n return ScalarOp(self._input_dims, coeff=coeff1+coeff2)\n\n # Finally if we are adding another BaseOperator subclass\n # we use that subclasses `_add` method and reshape the\n # final dimensions.\n return other._add(self).reshape(self._input_dims, self._output_dims)\n\n def _multiply(self, other):\n \"\"\"Return the ScalarOp other * self.\n\n Args:\n other (Number): a complex number.\n\n Returns:\n ScalarOp: the scaled identity operator other * self.\n\n Raises:\n QiskitError: if other is not a valid complex number.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other ({}) is not a number\".format(other))\n ret = self.copy()\n ret._coeff = other * self.coeff\n return ret\n" ]
[ [ "numpy.einsum", "numpy.reshape", "numpy.eye", "numpy.linalg.norm", "numpy.shape", "numpy.array" ], [ "numpy.eye", "numpy.product", "numpy.abs", "numpy.conjugate" ] ]
M155K4R4/TabR
[ "907a4d10589f29ab6a3774ef4c3fdeca98becfbc" ]
[ "Chapter08/ch02_predict.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# initialization\nnp.random.seed(100)\nalpha, sigma = 0.5, 0.5\nbeta = [1, 2.5]\nsize = 100\n\n# Predictor variable\nX1 = np.random.randn(size)\nX2 = np.random.randn(size) * 0.37\n\n# Simulate outcome variable\nY = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma\n\nfig, ax = plt.subplots(1, 2, sharex=True, figsize=(10, 4))\nfig.subplots_adjust(bottom=0.15, left=0.1)\n\nax[0].scatter(X1, Y)\nax[1].scatter(X2, Y)\nax[0].set_ylabel('Y')\nax[0].set_xlabel('X1')\nax[1].set_xlabel('X2')\n\n\nplt.grid(True)\nfig.savefig('predict.png', dpi=100)\nprint(\"finish\")\n\n" ]
[ [ "numpy.random.seed", "matplotlib.use", "matplotlib.pyplot.subplots", "numpy.random.randn", "matplotlib.pyplot.grid" ] ]
SNSerHello/detectron2
[ "3cc9908ba3301785ec946ae7e37d7091fa1d5045" ]
[ "projects/DensePose/densepose/modeling/hrfpn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nMIT License\nCopyright (c) 2019 Microsoft\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.modeling.backbone import BACKBONE_REGISTRY\nfrom detectron2.modeling.backbone.backbone import Backbone\n\nfrom .hrnet import build_pose_hrnet_backbone\n\n\nclass HRFPN(Backbone):\n \"\"\"HRFPN (High Resolution Feature Pyramids)\n Transforms outputs of HRNet backbone so they are suitable for the ROI_heads\n arXiv: https://arxiv.org/abs/1904.04514\n Adapted from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/hrfpn.py\n Args:\n bottom_up: (list) output of HRNet\n in_features (list): names of the input features (output of HRNet)\n in_channels (list): number of channels for each branch\n out_channels (int): output channels of feature pyramids\n n_out_features (int): number of output stages\n pooling (str): pooling for generating feature pyramids (from {MAX, AVG})\n share_conv (bool): Have one conv per output, or share one with all the outputs\n \"\"\"\n\n def __init__(\n self,\n bottom_up,\n in_features,\n n_out_features,\n in_channels,\n out_channels,\n pooling=\"AVG\",\n share_conv=False,\n ):\n super(HRFPN, self).__init__()\n assert isinstance(in_channels, list)\n self.bottom_up = bottom_up\n self.in_features = in_features\n self.n_out_features = n_out_features\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.share_conv = share_conv\n\n if self.share_conv:\n self.fpn_conv = nn.Conv2d(\n in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1\n )\n else:\n self.fpn_conv = nn.ModuleList()\n for _ in range(self.n_out_features):\n self.fpn_conv.append(\n nn.Conv2d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=3,\n padding=1,\n )\n )\n\n # Custom change: Replaces a simple bilinear interpolation\n self.interp_conv = nn.ModuleList()\n for i in range(len(self.in_features)):\n self.interp_conv.append(\n nn.Sequential(\n nn.ConvTranspose2d(\n in_channels=in_channels[i],\n out_channels=in_channels[i],\n kernel_size=4,\n stride=2**i,\n padding=0,\n output_padding=0,\n bias=False,\n ),\n nn.BatchNorm2d(in_channels[i], momentum=0.1),\n nn.ReLU(inplace=True),\n )\n )\n\n # Custom change: Replaces a couple (reduction conv + pooling) by one conv\n self.reduction_pooling_conv = nn.ModuleList()\n for i in range(self.n_out_features):\n self.reduction_pooling_conv.append(\n nn.Sequential(\n nn.Conv2d(sum(in_channels), out_channels, kernel_size=2**i, stride=2**i),\n nn.BatchNorm2d(out_channels, momentum=0.1),\n nn.ReLU(inplace=True),\n )\n )\n\n if pooling == \"MAX\":\n self.pooling = F.max_pool2d\n else:\n self.pooling = F.avg_pool2d\n\n self._out_features = []\n self._out_feature_channels = {}\n self._out_feature_strides = {}\n\n for i in range(self.n_out_features):\n self._out_features.append(\"p%d\" % (i + 1))\n self._out_feature_channels.update({self._out_features[-1]: self.out_channels})\n self._out_feature_strides.update({self._out_features[-1]: 2 ** (i + 2)})\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, a=1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, inputs):\n bottom_up_features = self.bottom_up(inputs)\n assert len(bottom_up_features) == len(self.in_features)\n inputs = [bottom_up_features[f] for f in self.in_features]\n\n outs = []\n for i in range(len(inputs)):\n outs.append(self.interp_conv[i](inputs[i]))\n shape_2 = min(o.shape[2] for o in outs)\n shape_3 = min(o.shape[3] for o in outs)\n out = torch.cat([o[:, :, :shape_2, :shape_3] for o in outs], dim=1)\n outs = []\n for i in range(self.n_out_features):\n outs.append(self.reduction_pooling_conv[i](out))\n for i in range(len(outs)): # Make shapes consistent\n outs[-1 - i] = outs[-1 - i][\n :, :, : outs[-1].shape[2] * 2**i, : outs[-1].shape[3] * 2**i\n ]\n outputs = []\n for i in range(len(outs)):\n if self.share_conv:\n outputs.append(self.fpn_conv(outs[i]))\n else:\n outputs.append(self.fpn_conv[i](outs[i]))\n\n assert len(self._out_features) == len(outputs)\n return dict(zip(self._out_features, outputs))\n\n\n@BACKBONE_REGISTRY.register()\ndef build_hrfpn_backbone(cfg, input_shape: ShapeSpec) -> HRFPN:\n\n in_channels = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS\n in_features = [\"p%d\" % (i + 1) for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES)]\n n_out_features = len(cfg.MODEL.ROI_HEADS.IN_FEATURES)\n out_channels = cfg.MODEL.HRNET.HRFPN.OUT_CHANNELS\n hrnet = build_pose_hrnet_backbone(cfg, input_shape)\n hrfpn = HRFPN(\n hrnet,\n in_features,\n n_out_features,\n in_channels,\n out_channels,\n pooling=\"AVG\",\n share_conv=False,\n )\n\n return hrfpn\n" ]
[ [ "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
Fuinn/mos-examples
[ "e7badef779f0918c6d09a52db00eb8f890234b57" ]
[ "examples/cvxpy/maxflow/maxflow_model.py" ]
[ "#@ Model: Max flow\n#@ Description: Model to find maximum flow across a network $$\\max x_{50}$$$$\\sum_k x_{ki}=\\sum_j x_{ij},\\quad\\forall i$$$$0 \\leq x_{ij} \\leq k_{ij},\\quad\\forall i,j$$\n\nimport cvxpy as cp\nimport numpy as np\nimport json\n\n\n#@ Helper Object: k\n#@ Description: Capacity of edges\nk = [[0,6,11,0,0,0],[0,0,0,12,5,0],[0,0,0,0,10,0],[0,0,0,0,0,13],[0,5,0,0,0,4],[1e9,0,0,0,0,0]]\nk = np.array(k)\n\n#@ Variable: x\n#@ Description: flow along each edge $$x\\in\\mathbb{r}^{6 \\times 6}$$\n#@ Labels: edges\nx = cp.Variable((6,6))\n\nedges = dict([((i,j), 'Edge %d %d' %(i, j)) for i in range (6) for j in range(6)])\n\n\n#@ Function: objectivefn\n#@ Description: $$x_{50}$$\nobjectivefn = x[5][0]\n\n#@ Constraint: nodalbalance\n#@ Description: Nodal balance, flows entering equal flows exiting at each node $$\\sum_k x_{ki} = \\sum_j x_{ij},\\quad\\forall i$$\n#@ Labels: nodes\nnodalbalance = cp.sum(x,axis=0) == cp.sum(x,axis=1)\n\nnodes = dict([(i, 'Node %d' %i) for i in range(6)])\n\n#@ Constraint: x_upperbound\n#@ Description: Capacity limit on each edge $$x \\leq k$$\n#@ Labels: edges\nx_upperbound = x <= k\n\n#@ Constraint: x_lowerbound\n#@ Description: $$x \\geq 0$$\n#@ Labels: edges\nx_lowerbound = x >= 0\n\n\n#@ Problem: allocation\nconstraints = []\nconstraints.append(nodalbalance)\nconstraints.append(x_upperbound)\nconstraints.append(x_lowerbound)\n\n\nallocation = cp.Problem(cp.Maximize(objectivefn), constraints)\n\n#@ Solver: solver\nsolver = \"ECOS\"\n\n\nallocation.solve(solver=solver, verbose=True, abstol=2e-3)\n\n" ]
[ [ "numpy.array" ] ]
helgeerbe/pi3d
[ "4ddc3cd6aab139602b4283015049e372a0c3530a" ]
[ "pi3d/util/Utility.py" ]
[ "import copy\nimport bisect\n\nfrom ctypes import c_float\nfrom numpy import subtract, dot, divide, sqrt as npsqrt\nfrom math import sqrt, sin, cos, tan, radians, pi, acos\n\nfrom pi3d.util.Ctypes import c_bytes\n\ndef normalize_v3(arr):\n ''' Normalize a numpy array of 3 component vectors shape=(n,3) '''\n lens = npsqrt( arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2) + 0.000001\n return divide(arr.T, lens).T\n\ndef magnitude(*args):\n \"\"\"Return the magnitude (root mean square) of the vector.\"\"\"\n return sqrt(dot(args, args))\n\ndef distance(v1, v2):\n \"\"\"Return the distance between two points.\"\"\"\n return magnitude(*subtract(v2, v1))\n\ndef from_polar(direction=0.0, magnitude=1.0):\n \"\"\"\n Convert polar coordinates into Cartesian (x, y) coordinates.\n\n Arguments:\n\n *direction*\n Vector angle in degrees.\n *magnitude*\n Vector length.\n \"\"\"\n return from_polar_rad(radians(direction), magnitude)\n\ndef from_polar_rad(direction=0.0, magnitude=1.0):\n \"\"\"\n Convert polar coordinates into Cartesian (x, y) coordinates.\n\n Arguments:\n\n *direction*\n Vector angle in radians.\n *magnitude*\n Vector length.\n \"\"\"\n return magnitude * cos(direction), magnitude * sin(direction)\n\n\ndef vec_sub(x, y):\n \"\"\"Return the difference between two vectors.\"\"\"\n return [a - b for a, b in zip(x, y)]\n\ndef vec_dot(x, y):\n \"\"\"Return the dot product of two vectors.\"\"\"\n return sum(a * b for a, b in zip(x, y))\n\ndef vec_cross(a,b):\n \"\"\"Return the cross product of two vectors.\"\"\"\n return [a[1] * b[2] - a[2] * b[1],\n a[2] * b[0] - a[0] * b[2],\n a[0] * b[1] - a[1] * b[0]]\n\ndef vec_normal(vec):\n \"\"\"Return a vector normalized to unit length for a vector of non-zero length,\n otherwise returns the original vector.\"\"\"\n n = sqrt(sum(x ** 2 for x in vec)) or 1\n return [x / n for x in vec]\n\n\ndef draw_level_of_detail(here, there, mlist):\n \"\"\"\n Level Of Detail checking and rendering. The shader and texture information\n must be set for all the buf objects in each model before draw_level_of_detail\n is called.\n\n Arguments:\n *here*\n An (x, y, z) tuple or array of view point.\n *there*\n An (x, y, z) tuple or array of model position.\n *mlist*\n A list of (distance, model) pairs with increasing distance, e.g.::\n\n [[20, model1], [100, model2], [250, None]]\n\n draw_level_of_detail() selects the first model that is more distant than\n the distance between the two points *here* and *there*, falling back to\n the last model otherwise. The model None is not rendered and is a good\n way to make sure that nothing is drawn past a certain distance.\n \"\"\"\n dist = distance(here, there)\n\n index = bisect.bisect_left(mlist, [dist, None])\n model = mlist[min(index, len(mlist) - 1)][1]\n model.position(there[0], there[1], there[2])\n model.draw()\n\n\"\"\"\n# TODO: None of these functions is actually called in the codebase.\n\ndef ctype_resize(array, new_size):\n resize(array, sizeof(array._type_) * new_size)\n return (array._type_ * new_size).from_address(addressof(array))\n\ndef showerror():\n return opengles.glGetError()\n\ndef limit(x, below, above):\n return max(min(x, above), below)\n\ndef angle_vecs(x1, y1, x2, y2, x3, y3):\n a = x2 - x1\n b = y2 - y1\n c = x2 - x3\n d = y2 - y3\n\n sqab = magnitude(a, b)\n sqcd = magnitude(c, d)\n l = sqab * sqcd\n if l == 0.0: # TODO: comparison between floats.\n l = 0.0001\n aa = ((a * c) + (b * d)) / l\n if aa == -1.0: # TODO: comparison between floats.\n return pi\n if aa == 0.0: # TODO: comparison between floats.\n return 0.0\n dist = (a * y3 - b * x3 + x1 * b - y1 * a) / sqab\n angle = acos(aa)\n\n if dist > 0.0:\n return pi * 2 - angle\n else:\n return angle\n\ndef calc_normal(x1, y1, z1, x2, y2, z2):\n dx = x2 - x1\n dy = y2 - y1\n dz = z2 - z1\n mag = magnitude(dx, dy, dz)\n return (dx / mag, dy / mag, dz / mag)\n\ndef rotate(rotx, roty, rotz):\n # TODO: why the reverse order?\n rotatef(rotz, 0, 0, 1)\n rotatef(roty, 0, 1, 0)\n rotatef(rotx, 1, 0, 0)\n\ndef angle_between(x1, y1, x2, y2, x3, y3):\n #Return the angle between two 3-vectors, or 0.0 if one or the other vector is\n #empty.\n\n #Arguments:\n # *x1, y1, z1*\n # The coordinates of the first vector.\n # *x2, y2, z2*\n # The coordinates of the second vector.\n \n a = x2 - x1\n b = y2 - y1\n c = x2 - x3\n d = y2 - y3\n\n sqab = sqrt(a * a + b * b)\n sqcd = sqrt(c * c + d * d)\n l = sqab * sqcd\n if l == 0.0:\n return 0.0\n\n aa = (a * c + b * d) / l\n if aa == -1.0:\n return pi\n if aa == 0.0:\n return pi / 2\n # TODO: this was originally 0! But if two vectors have a dot product\n # of zero, they are surely at right angles?\n\n dist = (a * y3 - b * x3 + x1 * b - y1 * a) / sqab\n angle = acos(aa)\n\n if dist > 0.0:\n return pi / 2.0 - angle\n else:\n return angle\n\ndef translate(matrix, vec):\n \n #Translate a 4x4 matrix by a 3-vector\n\n #Arguments:\n # *matrix*\n # The 4x4 matrix to translate.\n # *vec*\n # A 3-vector translation in x, y, z axes.\n \n return mat_mult([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [vec[0], vec[1], vec[2], 1]], matrix)\n\ndef transform(matrix, x, y, z, rx, ry, rz, sx, sy, sz, cx, cy, cz):\n #\"\"\n Rotate, scale and translate a 4x4 matrix.\n\n Arguments:\n *matrix*\n A 4x4 matrix to transform.\n *x, y, z*\n Translation in x, y and z axes.\n *rx, ry, rx*\n Rotations in x, y, and x axes.\n *sx, sy, sz*\n Scale factor in x, y, z axes.\n *cx, cy, cz*\n Center of the rotation.\n #\"\"\n # TODO: do we really need this? Wouldn't the separate parts suffice?\n #\n # TODO: the idea of translating then scaling then performing an inverse\n # translation seems like it wouldn't work?\n #\n\n matrix = copy.deepcopy(matrix)\n # TODO: is a copy really needed? Surely translate returns a new matrix?\n\n matrix = translate(matrix, (x - cx, y - cy, z - cz))\n matrix = rotate(matrix, rx, ry, rz)\n if sx != 1.0 or sy != 1.0 or sz != 1.0:\n matrix = scale(matrix, sx, sy, sz)\n return translate(matrix, (cx, cy, cz))\n\ndef scale(matrix, sx, sy, sz):\n #\"\"\n Scale a 4x4 matrix.\n\n Arguments:\n *sx, sy, sz*\n Scale factor in x, y, z axes.\n #\"\"\n return mat_mult([[sx, 0, 0, 0],\n [0, sy, 0, 0],\n [0, 0, sz, 0],\n [0, 0, 0, 1]], matrix)\n\ndef rotate(matrix, rx, ry, rz):\n #\"\"\n Rotate a 4x4 matrix.\n\n Arguments:\n *matrix*\n A 4x4 matrix.\n *rx, ry, rx*\n Rotations in x, y, and x axes.\n #\"\"\n if rz:\n matrix = rotateZ(matrix, rz)\n if rx:\n matrix = rotateX(matrix, rx)\n if ry:\n matrix = rotateY(matrix, ry)\n return matrix\n\ndef rotateX(matrix, angle):\n #\"\"\n Rotate a 4x4 matrix around the x axis.\n\n Arguments:\n *matrix*\n A 4x4 matrix.\n *angle*\n Angle of rotation around the x axis.\n #\"\"\n angle = radians(angle)\n c = cos(angle)\n s = sin(angle)\n return mat_mult([[1, 0, 0, 0],\n [0, c, s, 0],\n [0, -s, c, 0],\n [0, 0, 0, 1]],\n matrix)\n\ndef rotateY(matrix, angle):\n #\"\"\n #Rotate a 4x4 matrix around the y axis.#\n\n #Arguments:\n # *matrix*\n # A 4x4 matrix.\n # *angle*\n # Angle of rotation around the y axis.\n #\"\"\n angle = radians(angle)\n c = cos(angle)\n s = sin(angle)\n return mat_mult([[c, 0, -s, 0],\n [0, 1, 0, 0],\n [s, 0, c, 0],\n [0, 0, 0, 1]],\n matrix)\n\ndef rotateZ(matrix, angle):\n \n #Rotate a 4x4 matrix around the z axis.\n\n #Arguments:\n # *matrix*\n # A 4x4 matrix.\n # *angle*\n # Angle of rotation around the z axis.\n \n angle = radians(angle)\n c = cos(angle)\n s = sin(angle)\n return mat_mult([[c, s, 0, 0],\n [-s, c, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]],\n matrix)\n\ndef billboard_matrix():\n \"\"Return a matrix that copies x, y and sets z to 0.9.\"\"\n return [[1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.9, 1.0]]\n\n# TODO: We should use numpy for all of these.\ndef mat_mult(x, y):\n \"\"Return the product of two 4x4 matrices.\"\"\n return [[sum(x[i][j] * y[j][k] for j in range(4))\n for k in range(4)]\n for i in range(4)]\n\ndef mat_transpose(x):\n \"\"Return the transposition of a 4x4 matrix.\"\"\n return [[x[k][i] for k in range(4)] for i in range(4)]\n\ndef vec_mat_mult(vec, mat):\n \"\"Return the product of a 4-d vector and a 4x4 matrix.\n\n Arguments:\n *vec*\n A vector of length 4.\n *mat*\n A 4x4 matrix.\n\n \"\"\n return [sum(vec[j] * mat[j][k] for j in range(4)) for k in range(4)]\n\ndef translate_matrix(vec):\n \"\"Return a matrix that translates by the given vector.\"\"\n m = [[0] * 4] * 4\n for i in range(4):\n m[i][i] = 1.0\n for i in range(3):\n m[3][i] = vec[i]\n return m\n\nRECT_NORMALS = c_bytes((0, 0, -1,\n 0, 0, -1,\n 0, 0, -1,\n 0, 0, -1))\n\nRECT_TRIANGLES = c_bytes((3, 0, 1,\n 3, 1, 2))\n\ndef rect_triangles():\n opengles.glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, RECT_TRIANGLES)\n\ndef sqsum(*args):\n \"\"Return the sum of the squares of its arguments.\n\n DEPRECATED: use dot(x, x).\n \"\"\n return dot(args, args)\n\ndef load_identity():\n opengles.glLoadIdentity()\n\ndef dotproduct(x1, y1, z1, x2, y2, z2):\n \"\"Return the dot product of two 3-dimensional vectors given by coordinates.\n \"\"\n return x1 * x2 + y1 * y2 + z1 * z2\n\ndef crossproduct(x1, y1, z1, x2, y2, z2):\n \"\"Return the cross product of two 3-dimensional vectors given by coordinates.\n \"\"\n return y1 * z2 - z1 * y2, z1 * x2 - x1 * z2, x1 * y2 - y1 * x2\n\n\"\"\"\n" ]
[ [ "numpy.dot", "numpy.subtract", "numpy.sqrt", "numpy.divide" ] ]
larsoner/statsmodels
[ "898ddfc483c45bb0f8e5156dd8506abda84c9b63" ]
[ "statsmodels/regression/recursive_ls.py" ]
[ "\"\"\"\nRecursive least squares model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom warnings import warn\nfrom statsmodels.compat.collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.tsa.statespace.mlemodel import (\n MLEModel, MLEResults, MLEResultsWrapper)\nfrom statsmodels.tools.tools import Bunch\nfrom statsmodels.tools.decorators import cache_readonly, resettable_cache\nimport statsmodels.base.wrapper as wrap\n\n# Columns are alpha = 0.1, 0.05, 0.025, 0.01, 0.005\n_cusum_squares_scalars = np.array([\n [1.0729830, 1.2238734, 1.3581015, 1.5174271, 1.6276236],\n [-0.6698868, -0.6700069, -0.6701218, -0.6702672, -0.6703724],\n [-0.5816458, -0.7351697, -0.8858694, -1.0847745, -1.2365861]\n])\n\n\nclass RecursiveLS(MLEModel):\n r\"\"\"\n Recursive least squares\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n exog : array_like\n Array of exogenous regressors, shaped nobs x k.\n\n Notes\n -----\n Recursive least squares (RLS) corresponds to expanding window ordinary\n least squares (OLS).\n\n This model applies the Kalman filter to compute recursive estimates of the\n coefficients and recursive residuals.\n\n References\n ----------\n .. [1] Durbin, James, and Siem Jan Koopman. 2012.\n Time Series Analysis by State Space Methods: Second Edition.\n Oxford University Press.\n\n \"\"\"\n def __init__(self, endog, exog, **kwargs):\n # Standardize data\n if not _is_using_pandas(endog, None):\n endog = np.asanyarray(endog)\n\n exog_is_using_pandas = _is_using_pandas(exog, None)\n if not exog_is_using_pandas:\n exog = np.asarray(exog)\n\n # Make sure we have 2-dimensional array\n if exog.ndim == 1:\n if not exog_is_using_pandas:\n exog = exog[:, None]\n else:\n exog = pd.DataFrame(exog)\n\n self.k_exog = exog.shape[1]\n\n # Handle coefficient initialization\n # By default, do not calculate likelihood while it is controlled by\n # diffuse initial conditions.\n kwargs.setdefault('loglikelihood_burn', self.k_exog)\n kwargs.setdefault('initialization', 'approximate_diffuse')\n kwargs.setdefault('initial_variance', 1e9)\n\n # Initialize the state space representation\n super(RecursiveLS, self).__init__(\n endog, k_states=self.k_exog, exog=exog, **kwargs\n )\n\n # Setup the state space representation\n self['design'] = self.exog[:, :, None].T\n self['transition'] = np.eye(self.k_states)\n\n # Notice that the filter output does not depend on the measurement\n # variance, so we set it here to 1\n self['obs_cov', 0, 0] = 1.\n\n @classmethod\n def from_formula(cls, formula, data, subset=None):\n \"\"\"\n Not implemented for state space models\n \"\"\"\n return super(MLEModel, cls).from_formula(formula, data, subset)\n\n def fit(self):\n \"\"\"\n Fits the model by application of the Kalman filter\n\n Returns\n -------\n RecursiveLSResults\n \"\"\"\n # Get the smoother results with an arbitrary measurement variance\n smoother_results = self.smooth(return_ssm=True)\n # Compute the MLE of sigma2 (see Harvey, 1989 equation 4.2.5)\n resid = smoother_results.standardized_forecasts_error[0]\n sigma2 = (np.inner(resid, resid) /\n (self.nobs - self.loglikelihood_burn))\n\n # Now construct a results class, where the params are the final\n # estimates of the regression coefficients\n self['obs_cov', 0, 0] = sigma2\n return self.smooth()\n\n def filter(self, return_ssm=False, **kwargs):\n # Get the state space output\n result = super(RecursiveLS, self).filter([], transformed=True,\n cov_type='none',\n return_ssm=True, **kwargs)\n\n # Wrap in a results object\n if not return_ssm:\n params = result.filtered_state[:, -1]\n cov_kwds = {\n 'custom_cov_type': 'nonrobust',\n 'custom_cov_params': result.filtered_state_cov[:, :, -1],\n 'custom_description': ('Parameters and covariance matrix'\n ' estimates are RLS estimates'\n ' conditional on the entire sample.')\n }\n result = RecursiveLSResultsWrapper(\n RecursiveLSResults(self, params, result, cov_type='custom',\n cov_kwds=cov_kwds)\n )\n\n return result\n\n def smooth(self, return_ssm=False, **kwargs):\n # Get the state space output\n result = super(RecursiveLS, self).smooth([], transformed=True,\n cov_type='none',\n return_ssm=True, **kwargs)\n\n # Wrap in a results object\n if not return_ssm:\n params = result.filtered_state[:, -1]\n cov_kwds = {\n 'custom_cov_type': 'nonrobust',\n 'custom_cov_params': result.filtered_state_cov[:, :, -1],\n 'custom_description': ('Parameters and covariance matrix'\n ' estimates are RLS estimates'\n ' conditional on the entire sample.')\n }\n result = RecursiveLSResultsWrapper(\n RecursiveLSResults(self, params, result, cov_type='custom',\n cov_kwds=cov_kwds)\n )\n\n return result\n\n @property\n def param_names(self):\n return self.exog_names\n\n @property\n def start_params(self):\n # Only parameter is the measurement disturbance standard deviation\n return np.zeros(0)\n\n def update(self, params, **kwargs):\n \"\"\"\n Update the parameters of the model\n\n Updates the representation matrices to fill in the new parameter\n values.\n\n Parameters\n ----------\n params : array_like\n Array of new parameters.\n transformed : boolean, optional\n Whether or not `params` is already transformed. If set to False,\n `transform_params` is called. Default is True..\n\n Returns\n -------\n params : array_like\n Array of parameters.\n \"\"\"\n pass\n\n\nclass RecursiveLSResults(MLEResults):\n \"\"\"\n Class to hold results from fitting a recursive least squares model.\n\n Parameters\n ----------\n model : RecursiveLS instance\n The fitted model instance\n\n Attributes\n ----------\n specification : dictionary\n Dictionary including all attributes from the recursive least squares\n model instance.\n\n See Also\n --------\n statsmodels.tsa.statespace.kalman_filter.FilterResults\n statsmodels.tsa.statespace.mlemodel.MLEResults\n \"\"\"\n\n def __init__(self, model, params, filter_results, cov_type='opg',\n **kwargs):\n super(RecursiveLSResults, self).__init__(\n model, params, filter_results, cov_type, **kwargs)\n\n self.df_resid = np.inf # attribute required for wald tests\n\n # Save _init_kwds\n self._init_kwds = self.model._get_init_kwds()\n\n # Save the model specification\n self.specification = Bunch(**{\n 'k_exog': self.model.k_exog})\n\n @property\n def recursive_coefficients(self):\n \"\"\"\n Estimates of regression coefficients, recursively estimated\n\n Returns\n -------\n out: Bunch\n Has the following attributes:\n\n - `filtered`: a time series array with the filtered estimate of\n the component\n - `filtered_cov`: a time series array with the filtered estimate of\n the variance/covariance of the component\n - `smoothed`: a time series array with the smoothed estimate of\n the component\n - `smoothed_cov`: a time series array with the smoothed estimate of\n the variance/covariance of the component\n - `offset`: an integer giving the offset in the state vector where\n this component begins\n \"\"\"\n out = None\n spec = self.specification\n start = offset = 0\n end = offset + spec.k_exog\n out = Bunch(\n filtered=self.filtered_state[start:end],\n filtered_cov=self.filtered_state_cov[start:end, start:end],\n smoothed=None, smoothed_cov=None,\n offset=offset\n )\n if self.smoothed_state is not None:\n out.smoothed = self.smoothed_state[start:end]\n if self.smoothed_state_cov is not None:\n out.smoothed_cov = (\n self.smoothed_state_cov[start:end, start:end])\n return out\n\n @cache_readonly\n def resid_recursive(self):\n \"\"\"\n Recursive residuals\n\n Returns\n -------\n resid_recursive : array_like\n An array of length `nobs` holding the recursive\n residuals.\n\n Notes\n -----\n The first `k_exog` residuals are typically unreliable due to\n initialization.\n\n \"\"\"\n # See Harvey (1989) section 5.4; he defines the standardized\n # innovations in 5.4.1, but they have non-unit variance, whereas\n # the standardized forecast errors assume unit variance. To convert\n # to Harvey's definition, we need to multiply by the standard\n # deviation.\n return (self.filter_results.standardized_forecasts_error.squeeze() *\n self.filter_results.obs_cov[0, 0]**0.5)\n\n @cache_readonly\n def cusum(self):\n r\"\"\"\n Cumulative sum of standardized recursive residuals statistics\n\n Returns\n -------\n cusum : array_like\n An array of length `nobs - k_exog` holding the\n CUSUM statistics.\n\n\n Notes\n -----\n The CUSUM statistic takes the form:\n\n .. math::\n\n W_t = \\frac{1}{\\hat \\sigma} \\sum_{j=k+1}^t w_j\n\n where :math:`w_j` is the recursive residual at time :math:`j` and\n :math:`\\hat \\sigma` is the estimate of the standard deviation\n from the full sample.\n\n Excludes the first `k_exog` datapoints.\n\n Due to differences in the way :math:`\\hat \\sigma` is calculated, the\n output of this function differs slightly from the output in the\n R package strucchange and the Stata contributed .ado file cusum6. The\n calculation in this package is consistent with the description of\n Brown et al. (1975)\n\n References\n ----------\n .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.\n \"Techniques for Testing the Constancy of\n Regression Relationships over Time.\"\n Journal of the Royal Statistical Society.\n Series B (Methodological) 37 (2): 149-92.\n\n \"\"\"\n llb = self.loglikelihood_burn\n return (np.cumsum(self.resid_recursive[self.loglikelihood_burn:]) /\n np.std(self.resid_recursive[llb:], ddof=1))\n\n @cache_readonly\n def cusum_squares(self):\n r\"\"\"\n Cumulative sum of squares of standardized recursive residuals\n statistics\n\n Returns\n -------\n cusum_squares : array_like\n An array of length `nobs - k_exog` holding the\n CUSUM of squares statistics.\n\n Notes\n -----\n The CUSUM of squares statistic takes the form:\n\n .. math::\n\n s_t = \\left ( \\sum_{j=k+1}^t w_j^2 \\right ) \\Bigg /\n \\left ( \\sum_{j=k+1}^T w_j^2 \\right )\n\n where :math:`w_j` is the recursive residual at time :math:`j`.\n\n Excludes the first `k_exog` datapoints.\n\n References\n ----------\n .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.\n \"Techniques for Testing the Constancy of\n Regression Relationships over Time.\"\n Journal of the Royal Statistical Society.\n Series B (Methodological) 37 (2): 149-92.\n\n \"\"\"\n numer = np.cumsum(self.resid_recursive[self.loglikelihood_burn:]**2)\n denom = numer[-1]\n return numer / denom\n\n def plot_recursive_coefficient(self, variables=0, alpha=0.05,\n legend_loc='upper left', fig=None,\n figsize=None):\n r\"\"\"\n Plot the recursively estimated coefficients on a given variable\n\n Parameters\n ----------\n variables : int or str or iterable of int or string, optional\n Integer index or string name of the variable whose coefficient will\n be plotted. Can also be an iterable of integers or strings. Default\n is the first variable.\n alpha : float, optional\n The confidence intervals for the coefficient are (1 - alpha) %\n legend_loc : string, optional\n The location of the legend in the plot. Default is upper left.\n fig : Matplotlib Figure instance, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Notes\n -----\n All plots contain (1 - `alpha`) % confidence intervals.\n \"\"\"\n # Get variables\n if isinstance(variables, (int, str)):\n variables = [variables]\n k_variables = len(variables)\n\n # If a string was given for `variable`, try to get it from exog names\n exog_names = self.model.exog_names\n for i in range(k_variables):\n variable = variables[i]\n if isinstance(variable, str):\n variables[i] = exog_names.index(variable)\n\n # Create the plot\n from scipy.stats import norm\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n plt = _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n\n for i in range(k_variables):\n variable = variables[i]\n ax = fig.add_subplot(k_variables, 1, i + 1)\n\n # Get dates, if applicable\n if hasattr(self.data, 'dates') and self.data.dates is not None:\n dates = self.data.dates._mpl_repr()\n else:\n dates = np.arange(self.nobs)\n llb = self.loglikelihood_burn\n\n # Plot the coefficient\n coef = self.recursive_coefficients\n ax.plot(dates[llb:], coef.filtered[variable, llb:],\n label='Recursive estimates: %s' % exog_names[variable])\n\n # Legend\n handles, labels = ax.get_legend_handles_labels()\n\n # Get the critical value for confidence intervals\n if alpha is not None:\n critical_value = norm.ppf(1 - alpha / 2.)\n\n # Plot confidence intervals\n std_errors = np.sqrt(coef.filtered_cov[variable, variable, :])\n ci_lower = (\n coef.filtered[variable] - critical_value * std_errors)\n ci_upper = (\n coef.filtered[variable] + critical_value * std_errors)\n ci_poly = ax.fill_between(\n dates[llb:], ci_lower[llb:], ci_upper[llb:], alpha=0.2\n )\n ci_label = ('$%.3g \\\\%%$ confidence interval'\n % ((1 - alpha)*100))\n\n # Only add CI to legend for the first plot\n if i == 0:\n # Proxy artist for fill_between legend entry\n # See http://matplotlib.org/1.3.1/users/legend_guide.html\n p = plt.Rectangle((0, 0), 1, 1,\n fc=ci_poly.get_facecolor()[0])\n\n handles.append(p)\n labels.append(ci_label)\n\n ax.legend(handles, labels, loc=legend_loc)\n\n # Remove xticks for all but the last plot\n if i < k_variables - 1:\n ax.xaxis.set_ticklabels([])\n\n fig.tight_layout()\n\n return fig\n\n def _cusum_significance_bounds(self, alpha, ddof=0, points=None):\n \"\"\"\n Parameters\n ----------\n alpha : float, optional\n The significance bound is alpha %.\n ddof : int, optional\n The number of periods additional to `k_exog` to exclude in\n constructing the bounds. Default is zero. This is usually used\n only for testing purposes.\n points : iterable, optional\n The points at which to evaluate the significance bounds. Default is\n two points, beginning and end of the sample.\n\n Notes\n -----\n Comparing against the cusum6 package for Stata, this does not produce\n exactly the same confidence bands (which are produced in cusum6 by\n lw, uw) because they burn the first k_exog + 1 periods instead of the\n first k_exog. If this change is performed\n (so that `tmp = (self.nobs - llb - 1)**0.5`), then the output here\n matches cusum6.\n\n The cusum6 behavior does not seem to be consistent with\n Brown et al. (1975); it is likely they did that because they needed\n three initial observations to get the initial OLS estimates, whereas\n we do not need to do that.\n \"\"\"\n # Get the constant associated with the significance level\n if alpha == 0.01:\n scalar = 1.143\n elif alpha == 0.05:\n scalar = 0.948\n elif alpha == 0.10:\n scalar = 0.950\n else:\n raise ValueError('Invalid significance level.')\n\n # Get the points for the significance bound lines\n llb = self.loglikelihood_burn\n tmp = (self.nobs - llb - ddof)**0.5\n upper_line = lambda x: scalar * tmp + 2 * scalar * (x - llb) / tmp\n\n if points is None:\n points = np.array([llb, self.nobs])\n return -upper_line(points), upper_line(points)\n\n def plot_cusum(self, alpha=0.05, legend_loc='upper left',\n fig=None, figsize=None):\n r\"\"\"\n Plot the CUSUM statistic and significance bounds.\n\n Parameters\n ----------\n alpha : float, optional\n The plotted significance bounds are alpha %.\n legend_loc : string, optional\n The location of the legend in the plot. Default is upper left.\n fig : Matplotlib Figure instance, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Notes\n -----\n Evidence of parameter instability may be found if the CUSUM statistic\n moves out of the significance bounds.\n\n References\n ----------\n .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.\n \"Techniques for Testing the Constancy of\n Regression Relationships over Time.\"\n Journal of the Royal Statistical Society.\n Series B (Methodological) 37 (2): 149-92.\n\n \"\"\"\n # Create the plot\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n plt = _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n ax = fig.add_subplot(1, 1, 1)\n\n # Get dates, if applicable\n if hasattr(self.data, 'dates') and self.data.dates is not None:\n dates = self.data.dates._mpl_repr()\n else:\n dates = np.arange(self.nobs)\n llb = self.loglikelihood_burn\n\n # Plot cusum series and reference line\n ax.plot(dates[llb:], self.cusum, label='CUSUM')\n ax.hlines(0, dates[llb], dates[-1], color='k', alpha=0.3)\n\n # Plot significance bounds\n lower_line, upper_line = self._cusum_significance_bounds(alpha)\n ax.plot([dates[llb], dates[-1]], upper_line, 'k--',\n label='%d%% significance' % (alpha * 100))\n ax.plot([dates[llb], dates[-1]], lower_line, 'k--')\n\n ax.legend(loc=legend_loc)\n\n return fig\n\n def _cusum_squares_significance_bounds(self, alpha, points=None):\n \"\"\"\n Notes\n -----\n Comparing against the cusum6 package for Stata, this does not produce\n exactly the same confidence bands (which are produced in cusum6 by\n lww, uww) because they use a different method for computing the\n critical value; in particular, they use tabled values from\n Table C, pp. 364-365 of \"The Econometric Analysis of Time Series\"\n Harvey, (1990), and use the value given to 99 observations for any\n larger number of observations. In contrast, we use the approximating\n critical values suggested in Edgerton and Wells (1994) which allows\n computing relatively good approximations for any number of\n observations.\n \"\"\"\n # Get the approximate critical value associated with the significance\n # level\n llb = self.loglikelihood_burn\n n = 0.5 * (self.nobs - llb) - 1\n try:\n ix = [0.1, 0.05, 0.025, 0.01, 0.005].index(alpha / 2)\n except ValueError:\n raise ValueError('Invalid significance level.')\n scalars = _cusum_squares_scalars[:, ix]\n crit = scalars[0] / n**0.5 + scalars[1] / n + scalars[2] / n**1.5\n\n # Get the points for the significance bound lines\n if points is None:\n points = np.array([llb, self.nobs])\n line = (points - llb) / (self.nobs - llb)\n\n return line - crit, line + crit\n\n def plot_cusum_squares(self, alpha=0.05, legend_loc='upper left',\n fig=None, figsize=None):\n r\"\"\"\n Plot the CUSUM of squares statistic and significance bounds.\n\n Parameters\n ----------\n alpha : float, optional\n The plotted significance bounds are alpha %.\n legend_loc : string, optional\n The location of the legend in the plot. Default is upper left.\n fig : Matplotlib Figure instance, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Notes\n -----\n Evidence of parameter instability may be found if the CUSUM of squares\n statistic moves out of the significance bounds.\n\n Critical values used in creating the significance bounds are computed\n using the approximate formula of [2]_.\n\n References\n ----------\n .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.\n \"Techniques for Testing the Constancy of\n Regression Relationships over Time.\"\n Journal of the Royal Statistical Society.\n Series B (Methodological) 37 (2): 149-92.\n .. [2] Edgerton, David, and Curt Wells. 1994.\n \"Critical Values for the Cusumsq Statistic\n in Medium and Large Sized Samples.\"\n Oxford Bulletin of Economics and Statistics 56 (3): 355-65.\n\n \"\"\"\n # Create the plot\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n plt = _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n ax = fig.add_subplot(1, 1, 1)\n\n # Get dates, if applicable\n if hasattr(self.data, 'dates') and self.data.dates is not None:\n dates = self.data.dates._mpl_repr()\n else:\n dates = np.arange(self.nobs)\n llb = self.loglikelihood_burn\n\n # Plot cusum series and reference line\n ax.plot(dates[llb:], self.cusum_squares, label='CUSUM of squares')\n ref_line = (np.arange(llb, self.nobs) - llb) / (self.nobs - llb)\n ax.plot(dates[llb:], ref_line, 'k', alpha=0.3)\n\n # Plot significance bounds\n lower_line, upper_line = self._cusum_squares_significance_bounds(alpha)\n ax.plot([dates[llb], dates[-1]], upper_line, 'k--',\n label='%d%% significance' % (alpha * 100))\n ax.plot([dates[llb], dates[-1]], lower_line, 'k--')\n\n ax.legend(loc=legend_loc)\n\n return fig\n\n\nclass RecursiveLSResultsWrapper(MLEResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(RecursiveLSResultsWrapper, RecursiveLSResults)\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.inner", "numpy.sqrt", "numpy.asarray", "numpy.arange", "numpy.eye", "numpy.cumsum", "pandas.DataFrame", "numpy.std", "numpy.asanyarray", "numpy.array", "numpy.zeros" ] ]
marioguenzel/arr_curve
[ "af3b59cb4f5eaab52d12232c7ac1d63cd8e978f9" ]
[ "schedTest/OurAnalysis.py" ]
[ "# Schedulability test from our paper.\n\nimport itertools\nimport math\nimport numpy as np\n\n\ndef _WCRT_bound(\n ind, # index of task under analysis\n tasks, # set of tasks\n arr, # arrival curves\n xvec, # vector of x\n wcrtvec, # vector of WCRT bounds for higher priority tasks\n inda # a-th job in suspension-aware busy interval\n):\n '''Schedulability test from the paper, Thereom 17, Corollary 18.\n Assume that tasks are ordered by priority.\n\n Note that if the result is bigger than the task deadline, this \n is not a real response time upper bound.'''\n\n task_an = tasks[ind] # task under analysis\n HPTasks = tasks[:ind] # higher priority tasks\n\n # Compute Qvec\n Qvec = _compute_qvec(HPTasks, xvec)\n\n # Compute lower bound on first time that arr curve is at least a:\n # This is the infimum from Eq (12)\n arrival_bound = arr[ind].arrival_time(inda-1)\n\n # kind of TDA\n theta = inda * (task_an['execution'] + task_an['sslength']) # start\n\n while True:\n # compute left hand side of Eq (11)\n lhs = inda * (task_an['execution'] + task_an['sslength'])\n for idx, hptsk in enumerate(HPTasks):\n lhs += xvec[idx] * \\\n _compute_A1(theta+Qvec[idx], hptsk, arr[idx], wcrtvec[idx])\n\n lhs += (1-xvec[idx]) * \\\n _compute_A0(theta+Qvec[idx], hptsk, arr[idx], wcrtvec[idx])\n\n # check\n\n resp = theta - arrival_bound\n\n if resp > task_an['deadline']:\n break\n\n if lhs <= theta: # wcrt upper bound found\n break\n\n theta = lhs # increase theta\n\n return resp\n\n\ndef sched_test(\n taskset, # the taskset under analysis\n arr_curves, # list of arrival curve objects\n choose_xvec=0, # choose xvectors from a predefined list\n own_xvec=None, # set own list of xvectors\n upperbound_a=10, # upper bound for the index a\n # return response times instead of True, False is still returned:\n flag_return_response=False\n):\n ''' Schedulability test from COR18.\n Note: taskset has to be ordered by task priority\n Return: True = schedulable, False = no information\n '''\n # WCRT list\n wcrtlist = []\n\n for ind, tsk in enumerate(taskset):\n\n # set xvec list\n if own_xvec is None:\n if choose_xvec in [0, 'exh']:\n xveclist = Gen_xvec.all_combinations(len(taskset))\n elif choose_xvec in [1, 'all0']:\n xveclist = Gen_xvec.all_zero(len(taskset))\n elif choose_xvec in [2, 'all1']:\n xveclist = Gen_xvec.all_one(len(taskset))\n elif choose_xvec in [3, 'SleqC']:\n xveclist = Gen_xvec.heuristic1(taskset)\n elif choose_xvec in [4, 'lin']:\n xveclist = Gen_xvec.heuristic2(taskset, wcrtlist)\n elif choose_xvec in [5, 'comb3']:\n xveclist = (Gen_xvec.all_zero(len(taskset))\n + Gen_xvec.all_one(len(taskset))\n + Gen_xvec.heuristic2(taskset, wcrtlist))\n else:\n xveclist = own_xvec\n\n # analyse the task\n task_wcrtlist = []\n\n # (inda)-th job in a suspension-aware busy interval\n for inda in itertools.count(start=1):\n wcrt_bound_a = None\n\n for xvec in xveclist:\n # compute wcrt bound for this vec\n wcrt_bound_vec = _WCRT_bound(\n ind, taskset, arr_curves, xvec, wcrtlist, inda)\n\n # update wcrt bound for index a\n if (wcrt_bound_a is None) or wcrt_bound_a > wcrt_bound_vec:\n wcrt_bound_a = wcrt_bound_vec\n\n task_wcrtlist.append(wcrt_bound_a)\n\n # Check\n if wcrt_bound_a > tsk['deadline'] or inda >= upperbound_a:\n return False\n if wcrt_bound_a <= arr_curves[ind].arrival_time(inda) - arr_curves[ind].arrival_time(inda-1):\n break # break a\n\n wcrtlist.append(max(task_wcrtlist)) # add wcrt to list\n\n if flag_return_response is True:\n return wcrtlist\n else:\n return True\n\n\nclass Gen_xvec:\n '''Collection of generators for xvectors.'''\n\n def all_combinations(list_length, entries=[0, 1]):\n '''Return all possible xvectors.'''\n return list(itertools.product(entries, repeat=list_length))\n\n def all_zero(list_length):\n return [[0] * list_length]\n\n def all_one(list_length):\n return [[1] * list_length]\n\n def heuristic1(taskset):\n '''Heuristic from the end of the technical report of \n \"A Unifying Response Time Analysis Framework for DynamicSelf-Suspending\n Tasks\" by Chen, Nelissen, Huang in 20116'''\n vec = []\n for tsk in taskset:\n if tsk['sslength'] <= tsk['execution']:\n vec.append(1)\n else:\n vec.append(0)\n return [vec]\n\n def heuristic2(taskset, wcrts):\n '''Linear approximation from the end of the technical report of \n \"A Unifying Response Time Analysis Framework for DynamicSelf-Suspending\n Tasks\" by Chen, Nelissen, Huang in 20116'''\n\n vec_x = []\n sumU = 0\n\n for tsk, wcrt in zip(taskset, wcrts):\n indU = tsk['execution'] / tsk['period'] # compute util of task\n sumU += indU # total util\n\n # lhs and rhs of eq 27\n lhs = indU * (wcrt - tsk['execution'])\n rhs = tsk['sslength'] * sumU\n\n if lhs > rhs:\n vec_x.append(1)\n else:\n vec_x.append(0)\n\n return [vec_x]\n\n\ndef _compute_qvec(HPTasks, xvec):\n '''Compute the Qvector as in Lemma 16.'''\n Qvec = []\n Qvar = 0.0\n for idx, tsk in list(enumerate(HPTasks))[::-1]:\n Qvar += tsk['sslength']*xvec[idx]\n Qvec.insert(0, Qvar)\n\n return Qvec\n\n\ndef _compute_A1(\n delta, # input\n taskj, # task for the index\n arrj, # arrival curve for the index\n wcrtj # WCRT upper bound for the task with index\n):\n res = delta\n res += max(wcrtj - taskj['period'], 0)\n res = arrj(res)\n res *= taskj['execution']\n\n return res\n\n\ndef _compute_A0(\n delta, # input\n taskj, # task for the index\n arrj, # arrival curve for the index\n wcrtj, # WCRT upper bound for the task with index\n):\n Cstar = _compute_maxcurrwl(arrj, wcrtj, taskj['execution'])\n\n res = delta - taskj['period'] + wcrtj - Cstar\n res = arrj(res)\n res *= taskj['execution']\n res += Cstar\n\n # trivial bound\n res2 = arrj(delta + wcrtj) * taskj['execution']\n\n # return res\n return min(res, res2)\n\n\ndef _compute_maxcurrwl(\n arr_curve, # arrival curve\n wcrt, # WCRT upper bound\n wcet # WCET upper bound\n):\n val1 = arr_curve(wcrt) * wcet\n val2 = wcrt\n\n return min(val1, val2)\n\n\nclass ArrivalCurve:\n def __init__(self, func):\n self.base_function = func\n self.arrivals = []\n self.arrivals_gen = None\n\n def __call__(self, inputvalue):\n return self.base_function(inputvalue)\n\n def set_arrival_times(self, arrivallist):\n '''Set lower time bounds for the first arrivals.\n Arrivallist as generator.'''\n self.arrivals = arrivallist\n\n def set_arrival_times_gen(self, generator):\n '''Set lower time bounds for the first arrivals.\n Arrivallist as generator.'''\n self.arrivals_gen = generator\n\n def compute_first_arrivals(self, number, stepsize):\n '''Compute lower time bound for the first (number) arrivals.'''\n arrivallist = []\n tfloat = 0.0\n cmp_val = 1\n while cmp_val <= number:\n nxt = tfloat + stepsize # compute next value\n if nxt >= cmp_val:\n arrivallist.append(tfloat) # add to arrivallist\n cmp_val += 1\n tfloat = nxt\n\n self.arrivals = arrivallist\n\n def arrival_time(self, number):\n '''Lower bound on the arrival time of the (number)-th job \n in a suspension-aware busy interval.\n Note: arrivals has to be computed or set.'''\n\n while True:\n if len(self.arrivals) > number:\n return self.arrivals[number]\n else: # fill list using the generator\n self.arrivals.append(next(self.arrivals_gen))\n\n\ndef arr_sporadic(min_inter_arr):\n '''Returns the arrival curve for a sporadic task.'''\n def arr(delta):\n if delta <= 0:\n return 0\n else:\n return math.ceil(delta/min_inter_arr)\n\n def arr_times():\n timevar = 0.0\n while True:\n yield timevar\n timevar += min_inter_arr\n\n arr_curv = ArrivalCurve(arr)\n arr_curv.set_arrival_times_gen(arr_times())\n\n return arr_curv\n\n\ndef arr_jitter(min_inter_arr, jit):\n '''Returns the arrival curve for a task with jitter.'''\n def arr(delta):\n if delta <= 0:\n return 0\n else:\n return math.ceil((delta + jit*min_inter_arr)/min_inter_arr)\n\n def arr_times():\n timevar = 0.0\n yield timevar\n timevar += (1-jit) * min_inter_arr\n while True:\n yield timevar\n timevar += min_inter_arr\n\n arr_curv = ArrivalCurve(arr)\n arr_curv.set_arrival_times_gen(arr_times())\n\n return arr_curv\n\n\ndef arr_log(min_inter_arr):\n '''Returns the logarithmic arrival curve for a task.'''\n def arr(delta):\n if delta <= 0:\n return 0\n else:\n return np.log(delta+1)/np.log(min_inter_arr+1) + 1\n\n def arr_times():\n for ind in itertools.count(start=1):\n timevar = (ind-1)*np.log(min_inter_arr+1)\n timevar = np.exp(timevar) - 1\n yield timevar\n\n arr_curv = ArrivalCurve(arr)\n arr_curv.set_arrival_times_gen(arr_times())\n\n return arr_curv\n\n\ndef sota_CPA(\n taskset, # the taskset under analysis\n arr_curves, # list of arrival curve objects\n upperbound_a=10, # upper bound for the index a\n # return response times instead of True, False is still returned:\n flag_return_response=False\n):\n ''' State of the art for jitter based analysis (CPA).\n '''\n # WCRT list\n wcrtlist = []\n\n for ind, tsk in enumerate(taskset):\n\n # analyse the task\n task_wcrtlist = []\n\n # (inda)-th job in a suspension-aware busy interval\n for inda in itertools.count(start=1):\n wcrt_bound_a = _sota_CPA_wcrtbound(\n ind, taskset, arr_curves, wcrtlist, inda)\n\n task_wcrtlist.append(wcrt_bound_a)\n\n # Check\n if wcrt_bound_a > tsk['deadline'] or inda >= upperbound_a:\n return False\n if wcrt_bound_a <= arr_curves[ind].arrival_time(inda) - arr_curves[ind].arrival_time(inda-1):\n break # break a\n\n wcrtlist.append(max(task_wcrtlist)) # add wcrt to list\n\n if flag_return_response is True:\n return wcrtlist\n else:\n return True\n\n\ndef _sota_CPA_compute_interference(delta, tasks, WCRTs, arr_curves):\n val = 0\n for ind, wcrt in enumerate(WCRTs):\n val += arr_curves[ind](delta + wcrt) * tasks[ind]['execution']\n return val\n\n\ndef _sota_CPA_wcrtbound(ind, taskset, arr_curves, wcrtlist, inda):\n tsk = taskset[ind]\n arrival_bound = arr_curves[ind].arrival_time(inda-1)\n theta = 0\n while True:\n lhs = inda * (tsk['execution'] + tsk['sslength']) + \\\n _sota_CPA_compute_interference(\n theta, taskset, wcrtlist, arr_curves)\n\n resp = theta - arrival_bound\n\n if resp > tsk['deadline'] or lhs <= theta:\n break\n else:\n theta = lhs\n # breakpoint()\n\n return resp\n\n\n# def sota_CPA(taskset, arr_curves):\n# def compute_interference(delta, tasks, WCRTs, arr_curves):\n# val = 0\n# for ind, wcrt in enumerate(WCRTs):\n# val += arr_curves[ind](delta + wcrt) * tasks[ind]['execution']\n# return val\n\n# WCRTs = []\n\n# for tsk in taskset:\n# theta = 0\n# while True:\n# lhs = tsk['execution'] + tsk['sslength'] + \\\n# compute_interference(theta, taskset, WCRTs, arr_curves)\n\n# if lhs > tsk['deadline']:\n# return False\n# elif lhs <= theta:\n# WCRTs.append(theta)\n# break\n# else:\n# theta = lhs\n\n# return True\n\n\nif __name__ == '__main__':\n # # Test all combinations:\n # print('=Test all combinations=')\n # for ell in range(4):\n # print(ell, list(Gen_xvec.all_combinations(ell)))\n\n # # Test Qvec:\n # print('=Test Qvec=')\n # HPTasks = [\n # {'sslength': 10},\n # {'sslength': 30},\n # {'sslength': 100}\n # ]\n # for xvec in Gen_xvec.all_combinations(3):\n # print(xvec, _compute_qvec(HPTasks, xvec))\n\n # # Test arrival curve for sporadic:\n # print('=Test Arrival Curve for sporadic=')\n # arr_curve = arr_sporadic(3)\n # for delta in range(-10, 15):\n # print(delta, arr_curve(delta))\n # for number in range(5):\n # print(number, arr_curve.arrival_time(number))\n\n # # Test arrival curve for jitter:\n # print('=Test Arrival Curve for jitter=')\n # arr_curve = arr_jitter(4, 0.5)\n # for delta in range(-10, 15):\n # print(delta, arr_curve(delta))\n # for number in range(5):\n # print(number, arr_curve.arrival_time(number))\n\n # # Test compute A1:\n # print('=Test Compute A1=')\n # arr_curve = arr_sporadic(3)\n # for delta in range(0, 15):\n # print(delta, _compute_A1(\n # delta, {'period': 3, 'execution': 1}, arr_curve, 2))\n # for delta in range(0, 15):\n # print(delta, _compute_A1(\n # delta, {'period': 3, 'execution': 1}, arr_curve, 7))\n\n # # Test compute A0:\n # print('=Test Compute A0=')\n # arr_curve = arr_sporadic(3)\n # for delta in range(0, 15):\n # print(delta, _compute_A0(\n # delta, {'period': 3, 'execution': 1}, arr_curve, 2))\n # for delta in range(0, 15):\n # print(delta, _compute_A0(\n # delta, {'period': 3, 'execution': 1}, arr_curve, 7))\n\n # # Test schedulability test\n # print('=Test 1 schedulability test=')\n # taskset = []\n # taskset.append({'period': 4, 'deadline': 4,\n # 'execution': 1, 'sslength': 1})\n # taskset.append({'period': 10, 'deadline': 12,\n # 'execution': 2, 'sslength': 5})\n # taskset.append({'period': 100, 'deadline': 80,\n # 'execution': 10, 'sslength': 20})\n\n # arr_curves = [arr_sporadic(4), arr_sporadic(10), arr_sporadic(100)]\n\n # print(sched_test(taskset, arr_curves, choose_xvec=0, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=1, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=2, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=3, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=4, flag_return_response=True))\n\n # print('=Test 2 schedulability test=')\n # taskset = []\n # taskset.append({'period': 50, 'deadline': 200,\n # 'execution': 10, 'sslength': 10})\n # taskset.append({'period': 100, 'deadline': 200,\n # 'execution': 15, 'sslength': 15})\n # taskset.append({'period': 100, 'deadline': 300,\n # 'execution': 40, 'sslength': 20})\n\n # arr_curves = [arr_sporadic(50), arr_sporadic(100), arr_sporadic(100)]\n\n # print(sched_test(taskset, arr_curves, choose_xvec=0, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=1, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=2, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=3, flag_return_response=True))\n # print(sched_test(taskset, arr_curves, choose_xvec=4, flag_return_response=True))\n\n print('=Test 3 sota cpa=')\n taskset = []\n taskset.append({'period': 50, 'deadline': 100,\n 'execution': 10, 'sslength': 10})\n taskset.append({'period': 100, 'deadline': 200,\n 'execution': 15, 'sslength': 15})\n taskset.append({'period': 100, 'deadline': 300,\n 'execution': 40, 'sslength': 20})\n\n arr_curves = [arr_sporadic(50), arr_sporadic(100), arr_sporadic(100)]\n\n print(sota_CPA(taskset, arr_curves))\n" ]
[ [ "numpy.log", "numpy.exp" ] ]
RedFT/HexTiler
[ "e31d27b37db100b2cabd25803489e53cf41b8b52" ]
[ "hextiler/selection_tile.py" ]
[ "import numpy as np\n\nimport hexy as hx\nfrom hextiler.draw import make_hex_surface\n\n\nclass SelectionTile(hx.HexTile):\n def __init__(self, axial_coordinates, border_color, radius):\n self.axial_coordinates = np.array([axial_coordinates])\n self.cube_coordinates = hx.axial_to_cube(self.axial_coordinates)\n self.position = hx.axial_to_pixel(self.axial_coordinates, radius)\n self.color = border_color\n self.radius = radius\n self.image = make_hex_surface((0, 0, 0, 140), self.radius, self.color, hollow=True)\n\n def set_position(self, position):\n self.position = position\n self.axial_coordinates = hx.pixel_to_axial(self.position, self.radius)\n self.cube_coordinates = hx.pixel_to_cube(self.position, self.radius)\n\n def get_draw_position(self):\n \"\"\"\n Get the location to draw this hex so that the center of the hex is at `self.position`.\n :return: The location to draw this hex so that the center of the hex is at `self.position`.\n \"\"\"\n draw_position = self.position[0] - [self.image.get_width() / 2, self.image.get_height() / 2]\n return draw_position\n\n def get_position(self):\n \"\"\"\n Retrieves the location of the center of the hex.\n :return: The location of the center of the hex.\n \"\"\"\n return self.position[0]\n\n def copy(self):\n return SelectionTile(self.axial_coordinates[0], self.color, self.radius)" ]
[ [ "numpy.array" ] ]
MalcolmSlaney/audio-to-tactile
[ "8c1fa37509aa53307f24dc7d54e99f730a8bcc1f" ]
[ "extras/python/phonetics/classify_phoneme_test.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n\"\"\"Tests for phoneme classifier Python bindings.\"\"\"\n\nimport unittest\nimport numpy as np\n\nfrom extras.python import dsp\nfrom extras.python import frontend\nfrom extras.python.phonetics import classify_phoneme\nfrom extras.python.phonetics.sliding_window import sliding_window\n\nCLASSIFIER_INPUT_HZ = 16000\n\n\nclass ClassifyPhonemeTest(unittest.TestCase):\n\n def _run_phoneme(self, phoneme: str) -> None:\n \"\"\"A forgiving test that the classifier is basically working.\n\n Runs CarlFrontend + ClassifyPhoneme on a short WAV recording of a pure\n phone, and checks that a moderately confident score is sometimes given to\n the correct label.\n\n Args:\n phoneme: String, name of the phoneme to test.\n \"\"\"\n wav_file = (\n f'extras/test/testdata/phone_{phoneme}.wav'\n )\n samples, sample_rate_hz = dsp.read_wav_file(wav_file, dtype=np.float32)\n samples = samples.mean(axis=1)\n self.assertEqual(sample_rate_hz, CLASSIFIER_INPUT_HZ)\n\n # Run frontend to get CARL frames. The classifier expects input sample rate\n # CLASSIFIER_INPUT_HZ, block_size=128, pcen_cross_channel_diffusivity=60,\n # and otherwise the default frontend settings.\n carl = frontend.CarlFrontend(input_sample_rate_hz=CLASSIFIER_INPUT_HZ,\n block_size=128,\n pcen_cross_channel_diffusivity=60.0)\n self.assertEqual(carl.num_channels, classify_phoneme.NUM_CHANNELS)\n samples = samples[:len(samples) - len(samples) % carl.block_size]\n frames = carl.process_samples(samples)\n\n count_correct = 0\n count_total = 0\n for window in sliding_window(frames, classify_phoneme.NUM_FRAMES):\n scores = classify_phoneme.classify_phoneme_scores(window)\n # Count as \"correct\" if correct label's score is moderately confident.\n count_correct += (scores['phoneme'][phoneme] > 0.1)\n count_total += 1\n\n self.assertCountEqual(scores['phoneme'].keys(), classify_phoneme.PHONEMES)\n self.assertCountEqual(scores['manner'].keys(), classify_phoneme.MANNERS)\n self.assertCountEqual(scores['place'].keys(), classify_phoneme.PLACES)\n\n accuracy = float(count_correct) / count_total\n self.assertGreaterEqual(accuracy, 0.6)\n\n def test_phoneme_ae(self):\n self._run_phoneme('ae')\n\n def test_phoneme_er(self):\n self._run_phoneme('er')\n\n def test_phoneme_z(self):\n self._run_phoneme('z')\n\n def test_label_output(self):\n np.random.seed(0)\n\n for _ in range(5):\n frames = np.random.rand(classify_phoneme.NUM_FRAMES,\n classify_phoneme.NUM_CHANNELS)\n\n labels = classify_phoneme.classify_phoneme_labels(frames)\n scores = classify_phoneme.classify_phoneme_scores(frames)\n\n self.assertIn(labels['phoneme'], classify_phoneme.PHONEMES)\n self.assertIn(labels['manner'], classify_phoneme.MANNERS)\n self.assertIn(labels['place'], classify_phoneme.PLACES)\n\n score_argmax = max(scores['phoneme'], key=scores['phoneme'].get)\n self.assertEqual(labels['phoneme'], score_argmax)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.rand", "numpy.random.seed" ] ]
Micro-Masters/AI
[ "0696efd4114d5a7a5d659c9ab77dff6feb3abdd9" ]
[ "misc/ObservationsResearch/random_agent_obs.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A random agent for starcraft.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\n\n\n\nclass RandomAgent(base_agent.BaseAgent):\n \"\"\"A random agent for starcraft.\"\"\"\n\n def __init__(self):\n super(RandomAgent, self).__init__()\n self.previous_observations = None\n\n def step(self, obs):\n super(RandomAgent, self).step(obs)\n if self.previous_observations is not None:\n if obs.observation['available_actions'].all() != self.previous_observations.all():\n print(\"available_actions \" + str(obs.observation['available_actions']))\n self.previous_observations = obs.observation['available_actions']\n function_id = numpy.random.choice(obs.observation[\"available_actions\"])\n args = [[numpy.random.randint(0, size) for size in arg.sizes]\n for arg in self.action_spec.functions[function_id].args]\n return actions.FunctionCall(function_id, args)\n" ]
[ [ "numpy.random.randint", "numpy.random.choice" ] ]
AngusG/bn-adversarial-spheres
[ "91425eb95eba85c2977584d7d6a19f60fbd819ab" ]
[ "create_dataframe_fig6.py" ]
[ "import os\nimport argparse\nimport numpy as np\nfrom numpy.linalg import det, inv\nimport pandas as pd\n\n# for linear model\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\n\nfrom ReLUNetwork import ReLUNetwork\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='PyTorch Adversarial Spheres Training')\n # model settings\n parser.add_argument('--batch_norm', help='do batch normalization \\\n (with m=0, track=False)', action=\"store_true\")\n parser.add_argument('--layers', help='num layers in model', default=2,\n type=int)\n # training hyper-parameters\n parser.add_argument('--lr', help='learning rate', default=1e-2, type=float)\n parser.add_argument('--bs', help='examples per mini-batch', type=int,\n default=50)\n parser.add_argument('--epochs', help='number of epochs to train for',\n type=int, default=200)\n parser.add_argument('--seed', help='random seed', type=int, default=1)\n\n args = parser.parse_args()\n\n IDX_LSS = 0\n IDX_ACC = 1\n N = 500 # This is the dataset size.\n d = 2\n R = 1.3\n N_CLASSES = 2\n num_units = 1000\n do_batch_norm = True if args.batch_norm else False\n train_stats = np.zeros((5, args.epochs, 2))\n\n loss_fnct = nn.CrossEntropyLoss()\n softmax = torch.nn.Softmax(dim=0)\n batch_softmax = torch.nn.Softmax(dim=1)\n device = torch.device('cuda:0')\n\n for seed in range(5):\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n x = np.random.multivariate_normal(np.zeros(d), np.eye(d), N * 2)\n x = x / np.expand_dims(np.linalg.norm(x, axis=1), 1)\n\n # Training set\n train_x = np.zeros((2 * N, 2)) # DATA, FEATURES\n train_y = np.zeros(2 * N) # DATA, Int label\n\n # class 1\n train_x[:N, 0] = x[:N, 0].copy() #+ np.random.randn(N) / 30\n train_x[:N, 1] = x[:N, 1].copy() #+ np.random.randn(N) / 30\n train_y[:N] = 0\n\n # class 2\n train_x[N:, 0] = x[N:, 0].copy() * R #+ np.random.randn(N) / 30\n train_x[N:, 1] = x[N:, 1].copy() * R #+ np.random.randn(N) / 30\n train_y[N:] = 1\n\n X_test = torch.tensor(train_x, dtype=torch.float)\n Y_test = torch.tensor(train_y, dtype=torch.long)\n\n # create datasets\n test_dataset = torch.utils.data.TensorDataset(X_test, Y_test)\n test_loader_noshuffle = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=100, shuffle=False)\n rng_state = np.random.get_state()\n np.random.shuffle(train_x)\n np.random.set_state(rng_state)\n np.random.shuffle(train_y)\n is_shuffled = True\n\n X_train = torch.tensor(train_x, dtype=torch.float)\n Y_train = torch.tensor(train_y, dtype=torch.long)\n\n # create datasets\n train_dataset = torch.utils.data.TensorDataset(X_train, Y_train)\n train_loader_noshuffle = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=N, shuffle=False)\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=args.bs, shuffle=True)\n\n model = ReLUNetwork(train_x.shape[1], args.layers, num_units,\n N_CLASSES, do_batch_norm=do_batch_norm).to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n total_step = len(train_loader)\n\n for epoch in range(args.epochs):\n model.train()\n train_mb_loss = 0\n for i, (inputs, labels) in enumerate(train_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n loss = loss_fnct(outputs, labels)\n train_mb_loss += loss.item()\n # backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_stats[seed, epoch, IDX_LSS] = train_mb_loss / len(train_loader)\n\n model.eval()\n with torch.no_grad():\n total = 0\n correct = 0\n for inputs, labels in train_loader_noshuffle:\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n train_stats[seed, epoch, IDX_ACC] = float(correct) / total\n print('Epoch [{}/{}], Loss: {:.3f}, Train Acc: {:.4f}'\n .format(epoch, args.epochs, train_stats[seed, epoch, 0],\n train_stats[seed, epoch, 1]))\n filename = 'df_ep%d_bn%s_lr%.e' % (args.epochs, do_batch_norm, args.lr)\n print(filename)\n df_lss = pd.DataFrame(train_stats[:, :, 0]).melt()\n df_acc = pd.DataFrame(train_stats[:, :, 1]).melt()\n df_lss.to_pickle(filename + '_lss')\n df_acc.to_pickle(filename + '_acc')\n" ]
[ [ "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "numpy.random.get_state", "torch.max", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.TensorDataset", "numpy.eye", "torch.utils.data.DataLoader", "numpy.linalg.norm", "numpy.random.shuffle", "torch.tensor", "pandas.DataFrame", "numpy.random.set_state", "torch.no_grad", "torch.device", "numpy.zeros" ] ]
Wang-Yiran/insightface
[ "c2d9247f5e21a474084626660f3bce7f09edbfb2" ]
[ "src/api/face_model.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport mxnet as mx\nimport random\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\n#import facenet\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'align'))\nimport detect_face\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))\nimport face_image\nimport face_preprocess\n\ndef ch_dev(arg_params, aux_params, ctx):\n new_args = dict()\n new_auxs = dict()\n for k, v in arg_params.items():\n new_args[k] = v.as_in_context(ctx)\n for k, v in aux_params.items():\n new_auxs[k] = v.as_in_context(ctx)\n return new_args, new_auxs\n\ndef do_flip(data):\n for idx in xrange(data.shape[0]):\n data[idx,:,:] = np.fliplr(data[idx,:,:])\n\nclass FaceModel:\n def __init__(self, args):\n model = edict()\n with tf.Graph().as_default():\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.2\n sess = tf.Session(config=config)\n #sess = tf.Session()\n with sess.as_default():\n self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, None)\n\n self.threshold = args.threshold\n self.det_minsize = 50\n self.det_threshold = [0.4,0.6,0.6]\n self.det_factor = 0.9\n _vec = args.image_size.split(',')\n assert len(_vec)==2\n self.image_size = (int(_vec[0]), int(_vec[1]))\n _vec = args.model.split(',')\n assert len(_vec)==2\n prefix = _vec[0]\n epoch = int(_vec[1])\n print('loading',prefix, epoch)\n self.model = edict()\n # self.model.ctx = mx.gpu(args.gpu)\n self.model.ctx = mx.gpu(args.gpu) if mx.context.num_gpus() else mx.cpu(args.gpu)\n self.model.sym, self.model.arg_params, self.model.aux_params = mx.model.load_checkpoint(prefix, epoch)\n self.model.arg_params, self.model.aux_params = ch_dev(self.model.arg_params, self.model.aux_params, self.model.ctx)\n all_layers = self.model.sym.get_internals()\n self.model.sym = all_layers['fc1_output']\n\n def get_aligned_face(self, img, force = False):\n #print('before det', img.shape)\n bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)\n #if bounding_boxes.shape[0]==0:\n # fimg = np.copy(img)\n # do_flip(fimg)\n # bounding_boxes, points = detect_face.detect_face(fimg, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)\n if bounding_boxes.shape[0]==0 and force:\n print('force det', img.shape)\n bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, [0.3, 0.3, 0.1], self.det_factor)\n #bounding_boxes, points = detect_face.detect_face_force(img, None, self.pnet, self.rnet, self.onet)\n #print('after det')\n if bounding_boxes.shape[0]==0:\n return None\n bindex = 0\n nrof_faces = bounding_boxes.shape[0]\n det = bounding_boxes[:,0:4]\n img_size = np.asarray(img.shape)[0:2]\n if nrof_faces>1:\n bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])\n img_center = img_size / 2\n offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])\n offset_dist_squared = np.sum(np.power(offsets,2.0),0)\n bindex = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering\n det = bounding_boxes[:,0:4]\n det = det[bindex,:]\n points = points[:, bindex]\n landmark = points.reshape((2,5)).T\n #points need to be transpose, points = points.reshape( (5,2) ).transpose()\n det = np.squeeze(det)\n bb = det\n points = list(points.flatten())\n assert(len(points)==10)\n str_image_size = \"%d,%d\"%(self.image_size[0], self.image_size[1])\n warped = face_preprocess.preprocess(img, bbox=bb, landmark = landmark, image_size=str_image_size)\n warped = np.transpose(warped, (2,0,1))\n print(warped.shape)\n return warped\n\n def get_all_faces(self, img):\n str_image_size = \"%d,%d\"%(self.image_size[0], self.image_size[1])\n bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)\n ret = []\n for i in xrange(bounding_boxes.shape[0]):\n bbox = bounding_boxes[i,0:4]\n landmark = points[:, i].reshape((2,5)).T\n aligned = face_preprocess.preprocess(img, bbox=bbox, landmark = landmark, image_size=str_image_size)\n aligned = np.transpose(aligned, (2,0,1))\n ret.append(aligned)\n return ret\n\n def get_feature_impl(self, face_img, norm):\n embedding = None\n for flipid in [0,1]:\n _img = np.copy(face_img)\n if flipid==1:\n do_flip(_img)\n #nimg = np.zeros(_img.shape, dtype=np.float32)\n #nimg[:,ppatch[1]:ppatch[3],ppatch[0]:ppatch[2]] = _img[:, ppatch[1]:ppatch[3], ppatch[0]:ppatch[2]]\n #_img = nimg\n input_blob = np.expand_dims(_img, axis=0)\n self.model.arg_params[\"data\"] = mx.nd.array(input_blob, self.model.ctx)\n self.model.arg_params[\"softmax_label\"] = mx.nd.empty((1,), self.model.ctx)\n exe = self.model.sym.bind(self.model.ctx, self.model.arg_params ,args_grad=None, grad_req=\"null\", aux_states=self.model.aux_params)\n exe.forward(is_train=False)\n _embedding = exe.outputs[0].asnumpy()\n #print(_embedding.shape)\n if embedding is None:\n embedding = _embedding\n else:\n embedding += _embedding\n if norm:\n embedding = sklearn.preprocessing.normalize(embedding)\n return embedding\n\n def get_feature(self, face_img, norm=True):\n #aligned_face = self.get_aligned_face(img, force)\n #if aligned_face is None:\n # return None\n return self.get_feature_impl(face_img, norm)\n\n def is_same_id(self, source_img, target_img_list):\n source_face = self.get_aligned_face(source_img, True)\n print('source face', source_face.shape)\n target_face_list = []\n pp = 0\n for img in target_img_list:\n target_force = False\n if pp==len(target_img_list)-1 and len(target_face_list)==0:\n target_force = True\n target_face = self.get_aligned_face(img, target_force)\n if target_face is not None:\n target_face_list.append(target_face)\n pp+=1\n print('target face', len(target_face_list)) \n source_feature = self.get_feature(source_face, True)\n target_feature = None\n for target_face in target_face_list:\n _feature = self.get_feature(target_face, False)\n if target_feature is None:\n target_feature = _feature\n else:\n target_feature += _feature\n target_feature = sklearn.preprocessing.normalize(target_feature)\n #sim = np.dot(source_feature, target_feature.T)\n diff = np.subtract(source_feature, target_feature)\n dist = np.sum(np.square(diff),1)\n print('dist', dist)\n #print(sim, dist)\n if dist<=self.threshold:\n return True\n else:\n return False\n\n def sim(self, source_img, target_img_list):\n print('sim start')\n source_face = self.get_aligned_face(source_img, True)\n print('source face', source_face.shape)\n target_face_list = []\n pp = 0\n for img in target_img_list:\n target_force = False\n if pp==len(target_img_list)-1 and len(target_face_list)==0:\n target_force = True\n target_face = self.get_aligned_face(img, target_force)\n if target_face is not None:\n target_face_list.append(target_face)\n pp+=1\n print('target face', len(target_face_list)) \n source_feature = self.get_feature(source_face, True)\n target_feature = None\n sim_list = []\n for target_face in target_face_list:\n _feature = self.get_feature(target_face, True)\n _sim = np.dot(source_feature, _feature.T)\n sim_list.append(_sim)\n return np.max(sim_list)\n" ]
[ [ "numpy.square", "numpy.dot", "tensorflow.Graph", "numpy.expand_dims", "numpy.power", "numpy.fliplr", "numpy.asarray", "numpy.squeeze", "numpy.subtract", "tensorflow.ConfigProto", "numpy.max", "sklearn.preprocessing.normalize", "numpy.argmax", "numpy.copy", "tensorflow.Session", "numpy.transpose", "numpy.vstack" ] ]
pydojo/numexpr_zh
[ "3cc8eaa534373a80140229ef7ab79d1210f934a2" ]
[ "numexpr/necompiler.py" ]
[ "###################################################################\n# Numexpr - Fast numerical array expression evaluator for NumPy.\n#\n# License: MIT\n# Author: See AUTHORS.txt\n#\n# See LICENSE.txt and LICENSES/*.txt for details about copyright and\n# rights to use.\n####################################################################\n\nimport __future__\nimport sys\nimport numpy\nimport threading\n\nis_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE\nfrom numexpr import interpreter, expressions, use_vml\nfrom numexpr.utils import CacheDict\n\n# Declare a double type that does not exist in Python space\ndouble = numpy.double\nif sys.version_info[0] < 3:\n int_ = int\n long_ = long\nelse:\n int_ = numpy.int32\n long_ = numpy.int64\n\ntypecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float',\n 'd': 'double', 'c': 'complex', 's': 'bytes', 'n': 'none'}\nkind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f',\n 'double': 'd', 'complex': 'c', 'bytes': 's', 'none': 'n'}\ntype_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f',\n double: 'd', complex: 'c', bytes: 's'}\ntype_to_kind = expressions.type_to_kind\nkind_to_type = expressions.kind_to_type\ndefault_type = kind_to_type[expressions.default_kind]\n\n# VML functions that are implemented in numexpr\nvml_functions = [\n \"div\", # interp_body.cpp\n \"inv\", # interp_body.cpp\n \"pow\", # interp_body.cpp\n # Keep the rest of this list in sync with the ones listed in functions.hpp\n \"sqrt\",\n \"sin\",\n \"cos\",\n \"tan\",\n \"arcsin\",\n \"arccos\",\n \"arctan\",\n \"sinh\",\n \"cosh\",\n \"tanh\",\n \"arcsinh\",\n \"arccosh\",\n \"arctanh\",\n \"log\",\n \"log1p\",\n \"log10\",\n \"exp\",\n \"expm1\",\n \"absolute\",\n \"conjugate\",\n \"arctan2\",\n \"fmod\",\n \"ceil\",\n \"floor\"\n ]\n\n# Final addtions for Python 3 (mainly for PyTables needs)\nif sys.version_info[0] > 2:\n typecode_to_kind['s'] = 'str'\n kind_to_typecode['str'] = 's'\n type_to_typecode[str] = 's'\n\nscalar_constant_kinds = kind_to_typecode.keys()\n\n\nclass ASTNode(object):\n \"\"\"Abstract Syntax Tree node.\n\n Members:\n\n astType -- type of node (op, constant, variable, raw, or alias)\n astKind -- the type of the result (bool, float, etc.)\n value -- value associated with this node.\n An opcode, numerical value, a variable name, etc.\n children -- the children below this node\n reg -- the register assigned to the result for this node.\n \"\"\"\n cmpnames = ['astType', 'astKind', 'value', 'children']\n\n def __init__(self, astType='generic', astKind='unknown',\n value=None, children=()):\n object.__init__(self)\n self.astType = astType\n self.astKind = astKind\n self.value = value\n self.children = tuple(children)\n self.reg = None\n\n def __eq__(self, other):\n if self.astType == 'alias':\n self = self.value\n if other.astType == 'alias':\n other = other.value\n if not isinstance(other, ASTNode):\n return False\n for name in self.cmpnames:\n if getattr(self, name) != getattr(other, name):\n return False\n return True\n \n def __lt__(self,other):\n # RAM: this is a fix for issue #88 whereby sorting on constants \n # that may be of astKind == 'complex' but type(self.value) == int or float\n # Here we let NumPy sort as it will cast data properly for comparison \n # when the Python built-ins will raise an error.\n if self.astType == 'constant':\n if self.astKind == other.astKind:\n return numpy.array(self.value) < numpy.array(other.value)\n return self.astKind < other.astKind\n else:\n raise TypeError( 'Sorting not implemented for astType: %s'%self.astType )\n\n def __hash__(self):\n if self.astType == 'alias':\n self = self.value\n return hash((self.astType, self.astKind, self.value, self.children))\n\n def __str__(self):\n return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind,\n self.value, self.children, self.reg)\n\n def __repr__(self):\n return '<AST object at %s>' % id(self)\n\n def key(self):\n return (self.astType, self.astKind, self.value, self.children)\n\n def typecode(self):\n return kind_to_typecode[self.astKind]\n\n def postorderWalk(self):\n for c in self.children:\n for w in c.postorderWalk():\n yield w\n yield self\n\n def allOf(self, *astTypes):\n astTypes = set(astTypes)\n for w in self.postorderWalk():\n if w.astType in astTypes:\n yield w\n\n\ndef expressionToAST(ex):\n \"\"\"Take an expression tree made out of expressions.ExpressionNode,\n and convert to an AST tree.\n\n This is necessary as ExpressionNode overrides many methods to act\n like a number.\n \"\"\"\n return ASTNode(ex.astType, ex.astKind, ex.value,\n [expressionToAST(c) for c in ex.children])\n\n\ndef sigPerms(s):\n \"\"\"Generate all possible signatures derived by upcasting the given\n signature.\n \"\"\"\n codes = 'bilfdc'\n if not s:\n yield ''\n elif s[0] in codes:\n start = codes.index(s[0])\n for x in codes[start:]:\n for y in sigPerms(s[1:]):\n yield x + y\n elif s[0] == 's': # numbers shall not be cast to strings\n for y in sigPerms(s[1:]):\n yield 's' + y\n else:\n yield s\n\n\ndef typeCompileAst(ast):\n \"\"\"Assign appropiate types to each node in the AST.\n\n Will convert opcodes and functions to appropiate upcast version,\n and add \"cast\" ops if needed.\n \"\"\"\n children = list(ast.children)\n if ast.astType == 'op':\n retsig = ast.typecode()\n basesig = ''.join(x.typecode() for x in list(ast.children))\n # Find some operation that will work on an acceptable casting of args.\n for sig in sigPerms(basesig):\n value = (ast.value + '_' + retsig + sig).encode('ascii')\n if value in interpreter.opcodes:\n break\n else:\n for sig in sigPerms(basesig):\n funcname = (ast.value + '_' + retsig + sig).encode('ascii')\n if funcname in interpreter.funccodes:\n value = ('func_%sn' % (retsig + sig)).encode('ascii')\n children += [ASTNode('raw', 'none',\n interpreter.funccodes[funcname])]\n break\n else:\n raise NotImplementedError(\n \"couldn't find matching opcode for '%s'\"\n % (ast.value + '_' + retsig + basesig))\n # First just cast constants, then cast variables if necessary:\n for i, (have, want) in enumerate(zip(basesig, sig)):\n if have != want:\n kind = typecode_to_kind[want]\n if children[i].astType == 'constant':\n children[i] = ASTNode('constant', kind, children[i].value)\n else:\n opname = \"cast\"\n children[i] = ASTNode('op', kind, opname, [children[i]])\n else:\n value = ast.value\n children = ast.children\n return ASTNode(ast.astType, ast.astKind, value,\n [typeCompileAst(c) for c in children])\n\n\nclass Register(object):\n \"\"\"Abstraction for a register in the VM.\n\n Members:\n node -- the AST node this corresponds to\n temporary -- True if this isn't an input or output\n immediate -- not a register, but an immediate value\n n -- the physical register number.\n None if no number assigned yet.\n \"\"\"\n\n def __init__(self, astnode, temporary=False):\n self.node = astnode\n self.temporary = temporary\n self.immediate = False\n self.n = None\n\n def __str__(self):\n if self.temporary:\n name = 'Temporary'\n else:\n name = 'Register'\n return '%s(%s, %s, %s)' % (name, self.node.astType,\n self.node.astKind, self.n,)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Immediate(Register):\n \"\"\"Representation of an immediate (integer) operand, instead of\n a register.\n \"\"\"\n\n def __init__(self, astnode):\n Register.__init__(self, astnode)\n self.immediate = True\n\n def __str__(self):\n return 'Immediate(%d)' % (self.node.value,)\n\n\ndef stringToExpression(s, types, context):\n \"\"\"Given a string, convert it to a tree of ExpressionNode's.\n \"\"\"\n old_ctx = expressions._context.get_current_context()\n try:\n expressions._context.set_new_context(context)\n # first compile to a code object to determine the names\n if context.get('truediv', False):\n flags = __future__.division.compiler_flag\n else:\n flags = 0\n c = compile(s, '<expr>', 'eval', flags)\n # make VariableNode's for the names\n names = {}\n for name in c.co_names:\n if name == \"None\":\n names[name] = None\n elif name == \"True\":\n names[name] = True\n elif name == \"False\":\n names[name] = False\n else:\n t = types.get(name, default_type)\n names[name] = expressions.VariableNode(name, type_to_kind[t])\n names.update(expressions.functions)\n # now build the expression\n ex = eval(c, names)\n if expressions.isConstant(ex):\n ex = expressions.ConstantNode(ex, expressions.getKind(ex))\n elif not isinstance(ex, expressions.ExpressionNode):\n raise TypeError(\"unsupported expression type: %s\" % type(ex))\n finally:\n expressions._context.set_new_context(old_ctx)\n return ex\n\n\ndef isReduction(ast):\n prefixes = (b'sum_', b'prod_', b'min_', b'max_')\n return any(ast.value.startswith(p) for p in prefixes)\n\n\ndef getInputOrder(ast, input_order=None):\n \"\"\"Derive the input order of the variables in an expression.\n \"\"\"\n variables = {}\n for a in ast.allOf('variable'):\n variables[a.value] = a\n variable_names = set(variables.keys())\n\n if input_order:\n if variable_names != set(input_order):\n raise ValueError(\n \"input names (%s) don't match those found in expression (%s)\"\n % (input_order, variable_names))\n\n ordered_names = input_order\n else:\n ordered_names = list(variable_names)\n ordered_names.sort()\n ordered_variables = [variables[v] for v in ordered_names]\n return ordered_variables\n\n\ndef convertConstantToKind(x, kind):\n # Exception for 'float' types that will return the NumPy float32 type\n if kind == 'float':\n return numpy.float32(x)\n elif sys.version_info[0] >= 3 and isinstance(x,str):\n return x.encode('ascii')\n return kind_to_type[kind](x)\n\n\ndef getConstants(ast):\n '''\n RAM: implemented magic method __lt__ for ASTNode to fix issues\n #88 and #209. The following test code works now, as does the test suite.\n import numexpr as ne\n a = 1 + 3j; b = 5.0\n ne.evaluate( 'a*2 + 15j - b' )\n '''\n constants_order = sorted( ast.allOf('constant') )\n constants = [convertConstantToKind(a.value, a.astKind)\n for a in constants_order]\n return constants_order, constants\n\n\ndef sortNodesByOrder(nodes, order):\n order_map = {}\n for i, (_, v, _) in enumerate(order):\n order_map[v] = i\n dec_nodes = [(order_map[n.value], n) for n in nodes]\n dec_nodes.sort()\n return [a[1] for a in dec_nodes]\n\n\ndef assignLeafRegisters(inodes, registerMaker):\n \"\"\"Assign new registers to each of the leaf nodes.\n \"\"\"\n leafRegisters = {}\n for node in inodes:\n key = node.key()\n if key in leafRegisters:\n node.reg = leafRegisters[key]\n else:\n node.reg = leafRegisters[key] = registerMaker(node)\n\n\ndef assignBranchRegisters(inodes, registerMaker):\n \"\"\"Assign temporary registers to each of the branch nodes.\n \"\"\"\n for node in inodes:\n node.reg = registerMaker(node, temporary=True)\n\n\ndef collapseDuplicateSubtrees(ast):\n \"\"\"Common subexpression elimination.\n \"\"\"\n seen = {}\n aliases = []\n for a in ast.allOf('op'):\n if a in seen:\n target = seen[a]\n a.astType = 'alias'\n a.value = target\n a.children = ()\n aliases.append(a)\n else:\n seen[a] = a\n # Set values and registers so optimizeTemporariesAllocation\n # doesn't get confused\n for a in aliases:\n while a.value.astType == 'alias':\n a.value = a.value.value\n return aliases\n\n\ndef optimizeTemporariesAllocation(ast):\n \"\"\"Attempt to minimize the number of temporaries needed, by\n reusing old ones.\n \"\"\"\n nodes = [n for n in ast.postorderWalk() if n.reg.temporary]\n users_of = dict((n.reg, set()) for n in nodes)\n\n node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary))\n for n in nodes)\n if nodes and nodes[-1] is not ast:\n nodes_to_check = nodes + [ast]\n else:\n nodes_to_check = nodes\n for n in nodes_to_check:\n for c in n.children:\n if c.reg.temporary:\n users_of[c.reg].add(n)\n\n unused = dict([(tc, set()) for tc in scalar_constant_kinds])\n for n in nodes:\n for c in n.children:\n reg = c.reg\n if reg.temporary:\n users = users_of[reg]\n users.discard(n)\n if not users:\n unused[reg.node.astKind].add(reg)\n if unused[n.astKind]:\n reg = unused[n.astKind].pop()\n users_of[reg] = users_of[n.reg]\n n.reg = reg\n\n\ndef setOrderedRegisterNumbers(order, start):\n \"\"\"Given an order of nodes, assign register numbers.\n \"\"\"\n for i, node in enumerate(order):\n node.reg.n = start + i\n return start + len(order)\n\n\ndef setRegisterNumbersForTemporaries(ast, start):\n \"\"\"Assign register numbers for temporary registers, keeping track of\n aliases and handling immediate operands.\n \"\"\"\n seen = 0\n signature = ''\n aliases = []\n for node in ast.postorderWalk():\n if node.astType == 'alias':\n aliases.append(node)\n node = node.value\n if node.reg.immediate:\n node.reg.n = node.value\n continue\n reg = node.reg\n if reg.n is None:\n reg.n = start + seen\n seen += 1\n signature += reg.node.typecode()\n for node in aliases:\n node.reg = node.value.reg\n return start + seen, signature\n\n\ndef convertASTtoThreeAddrForm(ast):\n \"\"\"Convert an AST to a three address form.\n\n Three address form is (op, reg1, reg2, reg3), where reg1 is the\n destination of the result of the instruction.\n\n I suppose this should be called three register form, but three\n address form is found in compiler theory.\n \"\"\"\n return [(node.value, node.reg) + tuple([c.reg for c in node.children])\n for node in ast.allOf('op')]\n\n\ndef compileThreeAddrForm(program):\n \"\"\"Given a three address form of the program, compile it a string that\n the VM understands.\n \"\"\"\n\n def nToChr(reg):\n if reg is None:\n return b'\\xff'\n elif reg.n < 0:\n raise ValueError(\"negative value for register number %s\" % reg.n)\n else:\n if sys.version_info[0] < 3:\n return chr(reg.n)\n else:\n # int.to_bytes is not available in Python < 3.2\n #return reg.n.to_bytes(1, sys.byteorder)\n return bytes([reg.n])\n\n def quadrupleToString(opcode, store, a1=None, a2=None):\n cop = chr(interpreter.opcodes[opcode]).encode('ascii')\n cs = nToChr(store)\n ca1 = nToChr(a1)\n ca2 = nToChr(a2)\n return cop + cs + ca1 + ca2\n\n def toString(args):\n while len(args) < 4:\n args += (None,)\n opcode, store, a1, a2 = args[:4]\n s = quadrupleToString(opcode, store, a1, a2)\n l = [s]\n args = args[4:]\n while args:\n s = quadrupleToString(b'noop', *args[:3])\n l.append(s)\n args = args[3:]\n return b''.join(l)\n\n prog_str = b''.join([toString(t) for t in program])\n return prog_str\n\n\ncontext_info = [\n ('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),\n ('truediv', (False, True, 'auto'), 'auto')\n]\n\n\ndef getContext(kwargs, frame_depth=1):\n d = kwargs.copy()\n context = {}\n for name, allowed, default in context_info:\n value = d.pop(name, default)\n if value in allowed:\n context[name] = value\n else:\n raise ValueError(\"'%s' must be one of %s\" % (name, allowed))\n\n if d:\n raise ValueError(\"Unknown keyword argument '%s'\" % d.popitem()[0])\n if context['truediv'] == 'auto':\n caller_globals = sys._getframe(frame_depth + 1).f_globals\n context['truediv'] = caller_globals.get('division', None) == __future__.division\n\n return context\n\n\ndef precompile(ex, signature=(), context={}):\n \"\"\"Compile the expression to an intermediate form.\n \"\"\"\n types = dict(signature)\n input_order = [name for (name, type_) in signature]\n\n if isinstance(ex, (str, unicode)):\n ex = stringToExpression(ex, types, context)\n\n # the AST is like the expression, but the node objects don't have\n # any odd interpretations\n\n ast = expressionToAST(ex)\n\n if ex.astType != 'op':\n ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,))\n\n ast = typeCompileAst(ast)\n\n aliases = collapseDuplicateSubtrees(ast)\n\n assignLeafRegisters(ast.allOf('raw'), Immediate)\n assignLeafRegisters(ast.allOf('variable', 'constant'), Register)\n assignBranchRegisters(ast.allOf('op'), Register)\n\n # assign registers for aliases\n for a in aliases:\n a.reg = a.value.reg\n\n input_order = getInputOrder(ast, input_order)\n constants_order, constants = getConstants(ast)\n\n if isReduction(ast):\n ast.reg.temporary = False\n\n optimizeTemporariesAllocation(ast)\n\n ast.reg.temporary = False\n r_output = 0\n ast.reg.n = 0\n\n r_inputs = r_output + 1\n r_constants = setOrderedRegisterNumbers(input_order, r_inputs)\n r_temps = setOrderedRegisterNumbers(constants_order, r_constants)\n r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps)\n\n threeAddrProgram = convertASTtoThreeAddrForm(ast)\n input_names = tuple([a.value for a in input_order])\n signature = ''.join(type_to_typecode[types.get(x, default_type)]\n for x in input_names)\n return threeAddrProgram, signature, tempsig, constants, input_names\n\n\ndef NumExpr(ex, signature=(), **kwargs):\n \"\"\"\n Compile an expression built using E.<variable> variables to a function.\n\n ex can also be specified as a string \"2*a+3*b\".\n\n The order of the input variables and their types can be specified using the\n signature parameter, which is a list of (name, type) pairs.\n\n Returns a `NumExpr` object containing the compiled function.\n \"\"\"\n # NumExpr can be called either directly by the end-user, in which case\n # kwargs need to be sanitized by getContext, or by evaluate,\n # in which case kwargs are in already sanitized.\n # In that case frame_depth is wrong (it should be 2) but it doesn't matter\n # since it will not be used (because truediv='auto' has already been\n # translated to either True or False).\n\n context = getContext(kwargs, frame_depth=1)\n threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context)\n program = compileThreeAddrForm(threeAddrProgram)\n return interpreter.NumExpr(inputsig.encode('ascii'),\n tempsig.encode('ascii'),\n program, constants, input_names)\n\n\ndef disassemble(nex):\n \"\"\"\n Given a NumExpr object, return a list which is the program disassembled.\n \"\"\"\n rev_opcodes = {}\n for op in interpreter.opcodes:\n rev_opcodes[interpreter.opcodes[op]] = op\n r_constants = 1 + len(nex.signature)\n r_temps = r_constants + len(nex.constants)\n\n def getArg(pc, offset):\n if sys.version_info[0] < 3:\n arg = ord(nex.program[pc + offset])\n op = rev_opcodes.get(ord(nex.program[pc]))\n else:\n arg = nex.program[pc + offset]\n op = rev_opcodes.get(nex.program[pc])\n try:\n code = op.split(b'_')[1][offset - 1]\n except IndexError:\n return None\n if sys.version_info[0] > 2:\n # int.to_bytes is not available in Python < 3.2\n #code = code.to_bytes(1, sys.byteorder)\n code = bytes([code])\n if arg == 255:\n return None\n if code != b'n':\n if arg == 0:\n return b'r0'\n elif arg < r_constants:\n return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii')\n elif arg < r_temps:\n return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii')\n else:\n return ('t%d' % (arg,)).encode('ascii')\n else:\n return arg\n\n source = []\n for pc in range(0, len(nex.program), 4):\n if sys.version_info[0] < 3:\n op = rev_opcodes.get(ord(nex.program[pc]))\n else:\n op = rev_opcodes.get(nex.program[pc])\n dest = getArg(pc, 1)\n arg1 = getArg(pc, 2)\n arg2 = getArg(pc, 3)\n source.append((op, dest, arg1, arg2))\n return source\n\n\ndef getType(a):\n kind = a.dtype.kind\n if kind == 'b':\n return bool\n if kind in 'iu':\n if a.dtype.itemsize > 4:\n return long_ # ``long`` is for integers of more than 32 bits\n if kind == 'u' and a.dtype.itemsize == 4:\n return long_ # use ``long`` here as an ``int`` is not enough\n return int_\n if kind == 'f':\n if a.dtype.itemsize > 4:\n return double # ``double`` is for floats of more than 32 bits\n return float\n if kind == 'c':\n return complex\n if kind == 'S':\n return bytes\n raise ValueError(\"unknown type %s\" % a.dtype.name)\n\n\ndef getExprNames(text, context):\n ex = stringToExpression(text, {}, context)\n ast = expressionToAST(ex)\n input_order = getInputOrder(ast, None)\n #try to figure out if vml operations are used by expression\n if not use_vml:\n ex_uses_vml = False\n else:\n for node in ast.postorderWalk():\n if node.astType == 'op' and node.value in vml_functions:\n ex_uses_vml = True\n break\n else:\n ex_uses_vml = False\n\n return [a.value for a in input_order], ex_uses_vml\n\n\ndef getArguments(names, local_dict=None, global_dict=None):\n \"\"\"Get the arguments based on the names.\"\"\"\n call_frame = sys._getframe(2)\n\n clear_local_dict = False\n if local_dict is None:\n local_dict = call_frame.f_locals\n clear_local_dict = True\n try:\n frame_globals = call_frame.f_globals\n if global_dict is None:\n global_dict = frame_globals\n\n # If `call_frame` is the top frame of the interpreter we can't clear its \n # `local_dict`, because it is actually the `global_dict`.\n clear_local_dict = clear_local_dict and not frame_globals is local_dict\n\n arguments = []\n for name in names:\n try:\n a = local_dict[name]\n except KeyError:\n a = global_dict[name]\n arguments.append(numpy.asarray(a))\n finally:\n # If we generated local_dict via an explicit reference to f_locals,\n # clear the dict to prevent creating extra ref counts in the caller's scope\n # See https://github.com/pydata/numexpr/issues/310\n if clear_local_dict:\n local_dict.clear()\n\n return arguments\n\n\n# Dictionaries for caching variable names and compiled expressions\n_names_cache = CacheDict(256)\n_numexpr_cache = CacheDict(256)\n_numexpr_last = {}\n\nevaluate_lock = threading.Lock()\n\ndef evaluate(ex, local_dict=None, global_dict=None,\n out=None, order='K', casting='safe', **kwargs):\n \"\"\"Evaluate a simple array expression element-wise, using the new iterator.\n\n ex is a string forming an expression, like \"2*a+3*b\". The values for \"a\"\n and \"b\" will by default be taken from the calling function's frame\n (through use of sys._getframe()). Alternatively, they can be specifed\n using the 'local_dict' or 'global_dict' arguments.\n\n Parameters\n ----------\n\n local_dict : dictionary, optional\n A dictionary that replaces the local operands in current frame.\n\n global_dict : dictionary, optional\n A dictionary that replaces the global operands in current frame.\n\n out : NumPy array, optional\n An existing array where the outcome is going to be stored. Care is\n required so that this array has the same shape and type than the\n actual outcome of the computation. Useful for avoiding unnecessary\n new array allocations.\n\n order : {'C', 'F', 'A', or 'K'}, optional\n Controls the iteration order for operands. 'C' means C order, 'F'\n means Fortran order, 'A' means 'F' order if all the arrays are\n Fortran contiguous, 'C' order otherwise, and 'K' means as close to\n the order the array elements appear in memory as possible. For\n efficient computations, typically 'K'eep order (the default) is\n desired.\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur when making a copy or\n buffering. Setting this to 'unsafe' is not recommended, as it can\n adversely affect accumulations.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n \"\"\"\n global _numexpr_last\n if not isinstance(ex, (str, unicode)):\n raise ValueError(\"must specify expression as a string\")\n # Get the names for this expression\n context = getContext(kwargs, frame_depth=1)\n expr_key = (ex, tuple(sorted(context.items())))\n if expr_key not in _names_cache:\n _names_cache[expr_key] = getExprNames(ex, context)\n names, ex_uses_vml = _names_cache[expr_key]\n arguments = getArguments(names, local_dict, global_dict)\n\n # Create a signature\n signature = [(name, getType(arg)) for (name, arg) in\n zip(names, arguments)]\n\n # Look up numexpr if possible.\n numexpr_key = expr_key + (tuple(signature),)\n try:\n compiled_ex = _numexpr_cache[numexpr_key]\n except KeyError:\n compiled_ex = _numexpr_cache[numexpr_key] = NumExpr(ex, signature, **context)\n kwargs = {'out': out, 'order': order, 'casting': casting,\n 'ex_uses_vml': ex_uses_vml}\n _numexpr_last = dict(ex=compiled_ex, argnames=names, kwargs=kwargs)\n with evaluate_lock:\n return compiled_ex(*arguments, **kwargs)\n\n\ndef re_evaluate(local_dict=None):\n \"\"\"Re-evaluate the previous executed array expression without any check.\n\n This is meant for accelerating loops that are re-evaluating the same\n expression repeatedly without changing anything else than the operands.\n If unsure, use evaluate() which is safer.\n\n Parameters\n ----------\n\n local_dict : dictionary, optional\n A dictionary that replaces the local operands in current frame.\n\n \"\"\"\n try:\n compiled_ex = _numexpr_last['ex']\n except KeyError:\n raise RuntimeError(\"not a previous evaluate() execution found\")\n argnames = _numexpr_last['argnames']\n args = getArguments(argnames, local_dict)\n kwargs = _numexpr_last['kwargs']\n with evaluate_lock:\n return compiled_ex(*args, **kwargs)\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.float32" ] ]
janzill/fast-trips
[ "02597df56ed152e9993374ba0502d0de444da234" ]
[ "tests/test_distance.py" ]
[ "from __future__ import print_function\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport partridge as ptg\nimport pytest\n\nfrom fasttrips import Trip\nfrom fasttrips import Util\n\nTEST_NETWORKS = {\"Seattle_Region\": \"psrc_1_1\",\n \"Springfield\" : \"vermont\"}\n\nEXAMPLES_DIR = os.path.join(os.getcwd(), \"fasttrips\", \"Examples\")\n\[email protected](scope=\"module\")\ndef network_results(network):\n results = {\n 'Seattle_Region':\n {\n 't1': [0.0000, 0.18204, 0.85835, 1.59093, 1.73259],\n 't55': [0.0000, 1.40889],\n 't140': [0.0000, 0.39525, 0.91519],\n },\n 'Seattle_Region':\n {\n '690': [0.00000, 0.24679, 0.52990, 0.58124, 0.68396, 0.82198,\n 1.10185, 1.30837, 1.63678, 1.68605, 1.88833, 2.01921,\n 2.14929, 2.27598, 2.39962, 2.52896, 2.65403, 2.77906,\n 2.90012, 3.40607, 4.02007, 7.30269, 7.77643, 7.93774,\n 8.13528, 8.29669, 8.43537, 8.60926, 8.77880, 8.99127],\n '3942': [0.00000, 2.98571, 10.86012, 11.00405, 11.21411, 11.41179,\n 11.69441, 11.85530, 12.20669, 12.26657, 12.41157],\n '4023': [0.00000, 0.12492, 0.48199, 7.36683, 9.35049, 10.72752,\n 11.01201, 11.60369, 13.62171, 17.34048, 17.62048, 19.08759],\n }\n }\n yield results[network]\n\ndef test_calculate_distance_miles():\n orig_lat, orig_lon = 32.707431, -117.157058\n dest_lat, dest_lon = 32.740792, -117.211333\n cols = ['orig_lat','orig_lon','dest_lat','dest_lon','dist']\n\n df = pd.DataFrame([[orig_lat,orig_lon,dest_lat,dest_lon,np.nan]],\n columns=cols)\n\n Util.calculate_distance_miles(df, cols[0], cols[1], cols[2], cols[3], cols[4])\n distance = df[cols[4]][0]\n\n print('test_calculate_distance_miles: {:.5f} mi'.format(distance))\n assert abs(distance - 3.9116) < 0.0001\n" ]
[ [ "pandas.DataFrame" ] ]
PatKing27/polarimetry-analysis
[ "0103b401c96b99fdba373205be274bc9e7350940" ]
[ "Rotator.py" ]
[ "#*******************************Rotator.py*************************************#\n#\n# Author: Patrick King, Date: 02/06/18\n#\n# Update (PKK) 04/17/18: Updates to ensure compatibility with Observer and\n# Observable.\n#\n#******************************************************************************#\n\nfrom math import *\nimport numpy as np\nfrom scipy.ndimage.interpolation import rotate\n\nclass Rotator(object):\n\n # Constructor for the Rotator class. Instantiates the roll, pitch, and yaw\n # assuming the base line of sight is the z-axis.\n def __init__(self, args):\n self.roll = args[0]\n self.pitch = args[1]\n self.yaw = args[2]\n self.N = args[3]\n self.order = args[4]\n\n # Internal method: Rotate an array, with interpolation.\n def __Rotate(self, A, angle, axis):\n B = np.zeros(np.shape(A))\n if axis == 0:\n for i in range(self.N):\n B[:,:,i] = rotate(A[:,:,i], angle, reshape=False,\n order=self.order,mode='wrap')\n elif axis == 1:\n for i in range(self.N):\n B[:,i,:] = rotate(A[:,i,:], angle, reshape=False,\n order=self.order,mode='wrap')\n else:\n for i in range(self.N):\n B[i,:,:] = rotate(A[i,:,:], angle, reshape=False,\n order=self.order,mode='wrap')\n return B\n\n # Rotate Scalar Field, defined by S\n def ScalarRotate(self, S):\n if self.roll != 0.0:\n S = self.__Rotate(S, self.roll, axis = 2)\n if self.pitch != 0.0:\n S = self.__Rotate(S, self.pitch, axis = 0)\n if self.yaw != 0.0:\n S = self.__Rotate(S, self.yaw, axis = 1)\n return S\n\n # Rotate Vector Field with coordinate components C1, C2, C3\n def VectorRotate(self, C1, C2, C3):\n if self.roll != 0.0:\n C1, C2, C3 = self.__VectorRoll(C1,C2,C3)\n if self.pitch != 0.0:\n C1, C2, C3 = self.__VectorPitch(C1,C2,C3)\n if self.yaw != 0.0:\n C1, C2, C3 = self.__VectorYaw(C1,C2,C3)\n C1R = self.ScalarRotate(C1)\n C2R = self.ScalarRotate(C2)\n C3R = self.ScalarRotate(C3)\n return C1R, C2R, C3R\n\n def __VectorRoll(self, C1, C2, C3):\n cosb = np.cos(np.deg2rad(self.roll))\n sinb = np.sin(np.deg2rad(self.roll))\n C1r = C1\n C2r = cosb*C2 - sinb*C3\n C3r = sinb*C2 + cosb*C3\n return C1r, C2r, C3r\n\n def __VectorPitch(self, C1, C2, C3):\n cosa = np.cos(np.deg2rad(self.pitch))\n sina = np.sin(np.deg2rad(self.pitch))\n C1r = cosa*C1 - sina*C2\n C2r = sina*C1 + cosa*C2\n C3r = C3\n return C1r, C2r, C3r\n\n def __VectorYaw(self, C1, C2, C3):\n cosc = np.cos(np.deg2rad(self.yaw))\n sinc = np.sin(np.deg2rad(self.yaw))\n C1r = cosc*C1 + sinc*C3\n C2r = C2\n C3r = -sinc*C1 + cosc*C3\n return C1r, C2r, C3r\n" ]
[ [ "scipy.ndimage.interpolation.rotate", "numpy.deg2rad", "numpy.shape" ] ]
intsco/deep-learning-v2-pytorch
[ "00c8e371b6dc783fc5638348d93c862de3609765" ]
[ "project-bikesharing/my_answers.py" ]
[ "import numpy as np\n\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1 / (1 + np.exp(-x))\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n\n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n \n final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below\n # Implement the backproagation function below\n delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, \n delta_weights_i_h, delta_weights_h_o)\n self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n\n def forward_pass_train(self, X):\n ''' Implement forward pass here \n \n Arguments\n ---------\n X: features batch\n\n '''\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden)\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)\n #final_outputs = self.activation_function(final_inputs)\n final_outputs = final_inputs\n \n return final_outputs, hidden_outputs\n\n def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n ''' Implement backpropagation\n \n Arguments\n ---------\n final_outputs: output from forward pass\n y: target (i.e. label) batch\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n\n '''\n #### Implement the backward pass here ####\n ### Backward pass ###\n \n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs\n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n #output_error_term = error * np.multiply(final_outputs, (1 - final_outputs))\n output_error_term = error\n #print(output_error_term)\n \n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T).squeeze()\n \n #print(self.weights_hidden_to_output * output_error_term)\n #print(np.multiply(hidden_outputs, (1 - hidden_outputs)))\n hidden_error_term = np.multiply(hidden_error, np.multiply(hidden_outputs, (1 - hidden_outputs)))\n #print(hidden_error_term)\n \n # Weight step (input to hidden)\n delta_weights_i_h += np.outer(X, hidden_error_term)\n # Weight step (hidden to output)\n delta_weights_h_o += np.outer(hidden_outputs, output_error_term)\n \n return delta_weights_i_h, delta_weights_h_o\n\n def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n ''' Update weights on gradient descent step\n \n Arguments\n ---------\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n n_records: number of records\n\n '''\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step\n\n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n #hidden_inputs = None # signals into hidden layer\n #hidden_outputs = None # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n #final_inputs = None # signals into final output layer\n #final_outputs = None # signals from final output layer \n \n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden)\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)\n #final_outputs = self.activation_function(final_inputs)\n final_outputs = final_inputs\n \n return final_outputs\n\n\n#########################################################\n# Set your hyperparameters here\n##########################################################\niterations = 4000\nlearning_rate = 0.7\nhidden_nodes = 15\noutput_nodes = 1\n" ]
[ [ "numpy.dot", "numpy.multiply", "numpy.random.normal", "numpy.outer", "numpy.exp", "numpy.zeros" ] ]
gehuangyi20/random_spiking
[ "c98b550420ae4061b9d47ca475e86c981caf5514", "c98b550420ae4061b9d47ca475e86c981caf5514" ]
[ "RsNet/plot_transferability.py", "RsNet/utils.py" ]
[ "import os\nimport sys\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport json\nimport math\n\nargc = len(sys.argv)\nif argc < 2:\n print('usage: plot_transferability [dir] [filename]')\n sys.exit()\n\n_dir = sys.argv[1]\nimg_filename = sys.argv[2] if argc >= 3 else 'summary.png'\n\nconfig_fp = open(os.path.join(_dir, \"config.json\"), \"rb\")\njson_str = config_fp.read()\nconfig_fp.close()\n\nconfig = json.loads(json_str.decode())\n\nadv_model_name = config['adv_model_name']\nadv_model_name_st = config['adv_model_name_st']\nadv_confidence = config['adv_confidence']\n\ntest_model_name = config['test_model_name']\ntest_model_name_st = config['test_model_name_st']\n\nadv_model_len = len(adv_model_name)\n\ncategory = 0\nfor cur_adv_confidence in adv_confidence:\n category += len(cur_adv_confidence)\n\nx = []\nz_transfer = []\nz_predict = []\nz_attack_success = []\n\nfor i in range(category):\n x.append([])\n z_transfer.append([])\n z_predict.append([])\n z_attack_success.append([])\n\nidx = 0\nnum_model = 0\nfor i in range(len(adv_model_name)):\n cur_adv_model_name = adv_model_name[i]\n cur_adv_model_name_st = adv_model_name_st[i]\n for j in range(len(adv_confidence[i])):\n cur_adv_confidence = adv_confidence[i][j]\n num_model += 1\n\n for k in range(len(test_model_name)):\n cur_test_model_name = test_model_name[k]\n cur_test_model_name_st = test_model_name_st[k]\n\n filename = cur_test_model_name + '_transferability_' + cur_adv_model_name + '_' + \\\n str(cur_adv_confidence) + '.csv'\n cur_csvfile = open(os.path.join(_dir, filename), 'r')\n cur_reader = csv.DictReader(cur_csvfile, dialect='excel-tab')\n\n t = 0\n for row in cur_reader:\n t += 1\n x[idx].append(float(k))\n z_transfer[idx].append(float(row['float_adv_acc']))\n z_predict[idx].append(float(row['float_pred_acc']))\n z_attack_success[idx].append(float(row['attack_success_rate']))\n cur_csvfile.close()\n\n idx += 1\n\n\nfig = plt.figure(figsize=(15, 5))\n\nax0 = fig.add_subplot(1, 3, 1)\nax1 = fig.add_subplot(1, 3, 2)\nax2 = fig.add_subplot(1, 3, 3)\n\nax0.set_xlabel(\"adv_model\")\nax0.set_ylabel(\"transfer\")\nax0.set_title('Adv Transferability')\nax0.set_xticks(np.arange(len(test_model_name_st)))\nax0.set_xticklabels(test_model_name_st)\n\n\nax1.set_xlabel(\"adv_model\")\nax1.set_ylabel(\"prediction\")\nax1.set_title('Adv Prediction Acc on Test Model')\nax1.set_xticks(np.arange(len(test_model_name_st)))\nax1.set_xticklabels(test_model_name_st)\n\nax2.set_xlabel(\"adv_model\")\nax2.set_ylabel(\"attack_success\")\nax2.set_title('Attack Success Rate on Target Model')\nax2.set_xticks(np.arange(len(test_model_name_st)))\nax2.set_xticklabels(test_model_name_st)\n\nlegend = []\nidx = 0\nfor i in range(len(adv_model_name)):\n cur_adv_model_name = adv_model_name[i]\n cur_adv_model_name_st = adv_model_name_st[i]\n for j in range(len(adv_confidence[i])):\n cur_adv_confidence = adv_confidence[i][j]\n\n cur_x = np.asarray(x[idx])\n cur_x += idx / num_model\n cur_legend = ax0.scatter(cur_x, z_transfer[idx], label=cur_adv_model_name_st + \"_\" + str(cur_adv_confidence),\n s=20)\n legend.append(cur_legend)\n ax1.scatter(cur_x, z_predict[idx], s=20)\n ax2.scatter(cur_x, z_attack_success[idx], s=20)\n idx += 1\n\nax0.legend(handles=legend, bbox_to_anchor=(0., 1.05, 1., .102), ncol=3, loc=3)\n\nfig.tight_layout()\nfig.subplots_adjust(top=0.95-0.05*math.ceil(len(legend)/3), bottom=.1)\nfig.suptitle(img_filename)\n\nif img_filename:\n fig.savefig(os.path.join(_dir, img_filename))\n", "## utils.py -- utility functions\n##\n## Copyright (C) 2017, Dongyu Meng <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\nimport hashlib\nimport json\nimport os\nimport pickle\n\nimport numpy as np\nimport tensorflow as tf\nfrom easydict import EasyDict as edict\n\n\ndef prepare_data(dataset, idx):\n \"\"\"\n Extract data from index.\n\n dataset: Full, working dataset. Such as MNIST().\n idx: Index of test examples that we care about.\n return: X, targets, Y\n \"\"\"\n return dataset.test_data[idx], dataset.test_labels[idx], np.argmax(dataset.test_labels[idx], axis=1)\n\n\ndef save_obj(obj, name, directory='./attack_data/'):\n with open(os.path.join(directory, name + '.pkl'), 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name, directory='./attack_data/'):\n if name.endswith(\".pkl\"): name = name[:-4]\n with open(os.path.join(directory, name + '.pkl'), 'rb') as f:\n return pickle.load(f)\n\n\ndef save_cache(info, data, directory='./attack_data/', hash_alg='sha256'):\n info_str = json.dumps(info, sort_keys=True, separators=(',', ':'))\n h = hashlib.new(hash_alg, info_str.encode())\n hash_val = h.hexdigest()\n cache = {\n \"hash_id\": hash_val,\n \"hash_alg\": hash_alg,\n \"info\": info,\n \"data\": data\n }\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n fp = open(os.path.join(directory, hash_val), \"wb\")\n pickle.dump(cache, fp, pickle.HIGHEST_PROTOCOL)\n fp.close()\n return hash_val\n\n\ndef load_cache(info, directory='./attack_data/', hash_alg='sha256'):\n info_str = json.dumps(info, sort_keys=True, separators=(',', ':'))\n h = hashlib.new(hash_alg, info_str.encode())\n hash_val = h.hexdigest()\n filename = os.path.join(directory, hash_val)\n if not os.path.isfile(filename):\n return None\n\n fp = open(filename, \"rb\")\n cache = pickle.load(fp)\n fp.close()\n\n return cache\n\n\ndef load_model_idx(path):\n if path.endswith(\".idx\"):\n pass\n else:\n path += \".idx\"\n if not os.path.isfile(path):\n return None\n fp = open(path, 'rb')\n idx = pickle.load(fp)\n fp.close()\n return idx\n\n\ndef save_model_idx(path, data):\n if path.endswith(\".idx\"):\n pass\n else:\n path += \".idx\"\n\n fp = open(path, 'wb')\n idx = data.get_idx()\n pickle.dump(idx, fp, pickle.HIGHEST_PROTOCOL)\n fp.close()\n return idx\n\n\ndef softmax_cross_entropy_with_logits(correct, predicted):\n return tf.nn.softmax_cross_entropy_with_logits(labels=correct,\n logits=predicted)\n\n\ndef load_json(path):\n if not os.path.isfile(path):\n return None\n json_fp = open(path, \"rb\")\n json_str = json_fp.read()\n json_fp.close()\n # do not use edict since array type json is not a dictionary\n config = json.loads(json_str.decode())\n return config\n\n\ndef save_json(path, data, indent=None):\n config_fp = open(path, \"wb\")\n config_str = json.dumps(data, indent=indent)\n config_fp.write(config_str.encode())\n config_fp.close()\n\n\ndef get_num_records(filenames):\n def count_records(tf_record_filename):\n count = 0\n for _ in tf.python_io.tf_record_iterator(tf_record_filename):\n count += 1\n return count\n\n print(filenames)\n nfile = len(filenames)\n return (count_records(filenames[0]) * (nfile - 1) +\n count_records(filenames[-1]))\n\n\ndef load_config(filename):\n config_fp = open(filename, \"rb\")\n json_str = config_fp.read()\n config_fp.close()\n config = edict(json.loads(json_str.decode()))\n return config\n" ]
[ [ "numpy.asarray", "matplotlib.pyplot.figure" ], [ "tensorflow.python_io.tf_record_iterator", "tensorflow.nn.softmax_cross_entropy_with_logits", "numpy.argmax" ] ]
anhnt170489/FunMOT
[ "6eb794bd485be42270eaee3804e13d38a897a945" ]
[ "src/lib/tracking_utils/evaluation.py" ]
[ "import os\nimport numpy as np\nimport copy\nimport motmetrics as mm\n\nmm.lap.default_solver = 'lap'\n\nfrom tracking_utils.io import read_results, unzip_objs\n\n\nclass Evaluator(object):\n\n def __init__(self, data_root, seq_name, data_type):\n self.data_root = data_root\n self.seq_name = seq_name\n self.data_type = data_type\n\n self.load_annotations()\n self.reset_accumulator()\n\n def load_annotations(self):\n assert self.data_type == 'mot'\n\n # gt_filename = os.path.join(self.data_root, self.seq_name, 'gt_half', 'gt.txt')\n # gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')\n gt_filename = os.path.join(self.data_root, self.seq_name, 'gt_hs', 'gt.txt')\n self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)\n self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)\n\n def reset_accumulator(self):\n self.acc = mm.MOTAccumulator(auto_id=True)\n\n def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):\n # results\n trk_tlwhs = np.copy(trk_tlwhs)\n trk_ids = np.copy(trk_ids)\n\n # gts\n gt_objs = self.gt_frame_dict.get(frame_id, [])\n gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]\n\n # ignore boxes\n ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])\n ignore_tlwhs = unzip_objs(ignore_objs)[0]\n\n # remove ignored results\n keep = np.ones(len(trk_tlwhs), dtype=bool)\n iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)\n if len(iou_distance) > 0:\n match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n match_ious = iou_distance[match_is, match_js]\n\n match_js = np.asarray(match_js, dtype=int)\n match_js = match_js[np.logical_not(np.isnan(match_ious))]\n keep[match_js] = False\n trk_tlwhs = trk_tlwhs[keep]\n trk_ids = trk_ids[keep]\n # match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n # match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n # match_ious = iou_distance[match_is, match_js]\n\n # match_js = np.asarray(match_js, dtype=int)\n # match_js = match_js[np.logical_not(np.isnan(match_ious))]\n # keep[match_js] = False\n # trk_tlwhs = trk_tlwhs[keep]\n # trk_ids = trk_ids[keep]\n\n # get distance matrix\n iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)\n\n # acc\n self.acc.update(gt_ids, trk_ids, iou_distance)\n\n if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):\n events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics\n else:\n events = None\n return events\n\n def eval_file(self, filename):\n self.reset_accumulator()\n\n result_frame_dict = read_results(filename, self.data_type, is_gt=False)\n # frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))\n frames = sorted(list(set(result_frame_dict.keys())))\n for frame_id in frames:\n trk_objs = result_frame_dict.get(frame_id, [])\n trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]\n self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)\n\n return self.acc\n\n @staticmethod\n def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):\n names = copy.deepcopy(names)\n if metrics is None:\n metrics = mm.metrics.motchallenge_metrics\n metrics = copy.deepcopy(metrics)\n\n mh = mm.metrics.create()\n summary = mh.compute_many(\n accs,\n metrics=metrics,\n names=names,\n generate_overall=True\n )\n\n return summary\n\n @staticmethod\n def save_summary(summary, filename, epoch):\n import pandas as pd\n from openpyxl import load_workbook\n if not os.path.exists(filename):\n writer = pd.ExcelWriter(filename)\n else:\n book = load_workbook(filename)\n writer = pd.ExcelWriter(filename, engine='openpyxl')\n writer.book = book\n\n summary.to_excel(writer, sheet_name='Epoch ' + str(epoch))\n writer.save()\n writer.close()\n" ]
[ [ "numpy.asarray", "numpy.copy", "numpy.isnan", "pandas.ExcelWriter" ] ]
binh-vu/sm-table
[ "d4fc761bc0ad71e4a1252dc06f363861a2a6712c" ]
[ "grams/algorithm/type_feature.py" ]
[ "from collections import defaultdict\nfrom typing import Dict, List\n\nimport networkx as nx\nimport numpy as np\n\nfrom grams.algorithm.data_graph import CellNode\nfrom grams.algorithm.literal_match import TextParser\nfrom grams.algorithm.semantic_graph import SGColumnNode\nfrom grams.inputs.linked_table import LinkedTable\nfrom kgdata.wikidata.models import QNode, WDProperty, WDQuantityPropertyStats\n\n\nclass TypeFeatureExtraction:\n Freq = \"FrequencyOfType\"\n FreqOverRow = \"FreqOfTypeOverRow\"\n\n def __init__(self,\n table: LinkedTable, sg: nx.MultiDiGraph, dg: nx.MultiDiGraph,\n qnodes: Dict[str, QNode], wdprops: Dict[str, WDProperty],\n wd_num_prop_stats: Dict[str, WDQuantityPropertyStats]):\n self.table = table\n self.sg = sg\n self.dg = dg\n self.qnodes = qnodes\n self.wdprops = wdprops\n self.wd_num_prop_stats = wd_num_prop_stats\n self.text_parser = TextParser()\n\n # self.transitive_props = [p.id for p in self.wdprops.values() if p.is_transitive()]\n self.hierarchy_props = {\"P131\", \"P276\"}\n \n def extract_features(self):\n freq_type = {}\n freq_over_row = {}\n cell2qnodes = {}\n column2types = {}\n\n for uid, udata in self.sg.nodes(data=True):\n u: SGColumnNode = udata['data']\n if not u.is_column:\n continue\n\n # cells in this column\n cells: List[CellNode] = [self.dg.nodes[cid]['data'] for cid in u.nodes]\n covered_fractions = [\n sum(span.length for spans in cell.qnodes_span.values() for span in spans) / max(len(cell.value), 1)\n for cell in cells\n if len(cell.qnode_ids) > 0\n ]\n if len(covered_fractions) == 0:\n continue\n avg_covered_fractions = np.mean(covered_fractions)\n if avg_covered_fractions < 0.8:\n continue\n\n for cell in cells:\n self.add_merge_qnodes(cell, cell2qnodes)\n\n type2freq = defaultdict(int)\n for cell in cells:\n classes = set()\n for qnode in cell2qnodes[cell.id]:\n for stmt in qnode.props.get(\"P31\", []):\n classes.add(stmt.value.as_qnode_id())\n for c in classes:\n type2freq[c] += 1\n\n for c, freq in type2freq.items():\n freq_type[uid, c] = freq\n freq_over_row[uid, c] = freq / self.table.size()\n column2types[uid] = list(type2freq.keys())\n\n return {\n self.Freq: freq_type,\n self.FreqOverRow: freq_over_row,\n \"_column_to_types\": column2types\n }\n \n def add_merge_qnodes(self, cell: CellNode, cell2qnodes: Dict[str, List[QNode]]):\n # merge qnodes that are sub of each other\n # attempt to merge qnodes (spatial) if they are contained in each other\n # we should go even higher order\n if len(cell.qnode_ids) > 1:\n # attempt to merge qnodes (spatial) if they are contained in each other\n # we should go even higher order\n ignore_qnodes = set()\n for q0_id in cell.qnode_ids:\n q0 = self.qnodes[q0_id]\n vals = {\n stmt.value.as_qnode_id()\n for p in self.hierarchy_props\n for stmt in q0.props.get(p, [])\n }\n for q1_id in cell.qnode_ids:\n if q0_id == q1_id:\n continue\n if q1_id in vals:\n # q0 is inside q1, ignore q1\n ignore_qnodes.add(q1_id)\n cell2qnodes[cell.id] = [self.qnodes[q_id] for q_id in cell.qnode_ids if q_id not in ignore_qnodes]\n elif len(cell.qnode_ids) > 0:\n cell2qnodes[cell.id] = [self.qnodes[cell.qnode_ids[0]]]\n else:\n cell2qnodes[cell.id] = []" ]
[ [ "numpy.mean" ] ]
SJ-YI/HSRDEV
[ "8fa42bc41e337f0777e6980b37907dbd3c1067da" ]
[ "Run/datarecv.py" ]
[ "#!/usr/bin/env python\n# Copyright (C) 2016 Toyota Motor Corporation\nimport controller_manager_msgs.srv\nimport rospy\nimport math\nimport time\nimport datetime\n\nimport trajectory_msgs.msg\nimport actionlib\nimport control_msgs.msg\nimport geometry_msgs.msg\nimport os\nimport sys\nimport signal\n\nfrom sensor_msgs.msg import Image\nfrom nav_msgs.msg import OccupancyGrid\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import CompressedImage\nfrom geometry_msgs.msg import PoseStamped\nfrom actionlib_msgs.msg import GoalStatusArray\nfrom std_msgs.msg import Int32\nimport numpy as np\n\nimport tf2_ros\nimport tf2_geometry_msgs\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler, quaternion_matrix\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\ncv_img=None\nbridge=CvBridge()\n\nlogfile=open(\"poselog.txt\",\"a\")\nlogfile.write(\"=======\"+ unicode(datetime.datetime.now()) +\"\\n\")\n\n\ndef signal_handler(signal,frame):\n print(\"EXIT!\")\n logfile.close()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\ntfbuffer=tf2_ros.Buffer(rospy.Duration(10.0))\ntflistener=tf2_ros.TransformListener(tfbuffer)\n\npub_depth = rospy.Publisher('/pnu/depth_rect_raw',Image, queue_size=1)\npub_rgb = rospy.Publisher('/pnu/rgb_rect_raw',Image, queue_size=1)\n\nrospy.init_node('datareceiver')\n\n\n# wait to establish connection between the controller\n# while pub_depth.get_num_connections() == 0: rospy.sleep(0.1)\n# print \"DEPTH CONNECTED\"\n# while pub_rgb.get_num_connections() == 0: rospy.sleep(0.1)\n# print \"RGB CONNECTED\"\n\ndef depth_callback(data):\n global rgb_msg,rgb_count\n global cv_img\n # print(\"Encoding\",data.format)\n # print(\"RGB size:\", sys.getsizeof(data.data))\n if sys.getsizeof(data.data)>1000:\n np_arr = np.fromstring(data.data, np.uint8)\n image_np = cv2.imdecode(np_arr, 0) #0:8 bit grey\n #we map 0-255 to 500mm - 2550mm\n # print \"depth jpg:\", np_arr.shape\n # print \"decode jpg:\",image_np.shape\n image_np16 = image_np.astype(np.uint16)*10+500\n image_np16_0 = 1-np.clip(image_np.astype(np.uint16),0,1)\n image_np16_255 = image_np.astype(np.uint16)/255\n image_np16 = image_np16 - 3050*image_np16_255-500*image_np16_0\n\n depth_msg=Image()\n depth_msg.header=data.header\n depth_msg.width=640\n depth_msg.height=480\n depth_msg.encoding=\"mono16\"\n depth_msg.step = 640*2\n depth_msg.data=image_np16.tostring()\n pub_depth.publish(depth_msg)\n print(\"Depth: packed %d KB org %d KB\" % (sys.getsizeof(data.data)/1024,sys.getsizeof(image_np16)/1024))\n\ndef rgb_callback(data):\n global rgb_msg,rgb_count\n global cv_img\n # print(\"Encoding\",data.format)\n if sys.getsizeof(data.data)>1000:\n np_arr = np.fromstring(data.data, np.uint8)\n image_np = cv2.imdecode(np_arr, 1)\n img_msg = bridge.cv2_to_imgmsg(image_np, \"bgr8\")\n img_msg.header=data.header\n img_msg.width=640\n img_msg.height=480\n pub_rgb.publish(img_msg)\n\n print(\"RGB: packed %d KB org %d KB\" % (sys.getsizeof(data.data)/1024,sys.getsizeof(image_np)/1024))\n\n\nrospy.Subscriber(\"/pnu/depth_rect_compressed\", CompressedImage, depth_callback)\nrospy.Subscriber(\"/pnu/rgb_rect_compressed\", CompressedImage,rgb_callback)\n\n\nlast_time = time.time()\nt_last_send=last_time\nt_last_send_pcl=last_time+0.5\nsend_interval = 1.0\nsend_interval_pcl = 3.0\n\nrospy.spin()\n" ]
[ [ "numpy.fromstring" ] ]
nam1665/voice_clone
[ "7808d6f80aa9bbaffe367fde07b1c6f96cd3697e" ]
[ "synthesizer/inference.py" ]
[ "from synthesizer.hparams import hparams\nfrom synthesizer.synthesizer import Synthesizer\nfrom synthesizer import audio\nfrom pathlib import Path\nfrom typing import Union, List\nimport tensorflow as tf\nimport numpy as np\nimport librosa\n\n_model = None # type: Synthesizer\nsample_rate = hparams.sample_rate\n\n# TODO: allow for custom hparams throughout this module?\n\ndef load_model(checkpoints_dir: Path):\n global _model\n \n tf.reset_default_graph()\n _model = Synthesizer()\n checkpoint_fpath = tf.train.get_checkpoint_state(checkpoints_dir).model_checkpoint_path\n _model.load(checkpoint_fpath, hparams)\n \n model_name = checkpoints_dir.parent.name.replace(\"logs-\", \"\")\n step = int(checkpoint_fpath[checkpoint_fpath.rfind('-') + 1:])\n print(\"Loaded synthesizer \\\"%s\\\" trained to step %d\" % (model_name, step))\n\ndef is_loaded():\n return _model is not None\n\ndef synthesize_spectrograms(texts: List[str], embeddings: np.ndarray, return_alignments=False):\n \"\"\"\n Synthesizes mel spectrograms from texts and speaker embeddings.\n \n :param texts: a list of N text prompts to be synthesized\n :param embeddings: a numpy array of (N, 256) speaker embeddings\n :param return_alignments: if True, a matrix representing the alignments between the characters\n and each decoder output step will be returned for each spectrogram\n :return: a list of N melspectrograms as numpy arrays of shape (80, M), and possibly the \n alignments.\n \"\"\"\n if not is_loaded():\n raise Exception(\"Load a model first\")\n \n specs, alignments = _model.my_synthesize(embeddings, texts)\n \n if return_alignments:\n return specs, alignments\n else:\n return specs\n\ndef load_preprocess_wav(fpath):\n wav = librosa.load(fpath, hparams.sample_rate)[0]\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n return wav\n\ndef make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]):\n if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):\n wav = load_preprocess_wav(fpath_or_wav)\n else: \n wav = fpath_or_wav\n \n mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)\n return mel_spectrogram\n\ndef griffin_lim(mel):\n return audio.inv_mel_spectrogram(mel, hparams)\n\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.reset_default_graph", "numpy.abs" ] ]
zprobs/representation-algorithm
[ "182219265f6056f01f04ed728200d35837b30932" ]
[ "tests/test.py" ]
[ "import unittest\nimport numpy as np\nimport pygambit\nimport os\n\nfrom representation import build_representation\n\n\nclass TestRepresentation(unittest.TestCase):\n def test_empty(self):\n empty_game = pygambit.Game.new_tree()\n empty_game.title = 'EFG'\n self.assertEqual(build_representation(np.array([[]])), empty_game.write())\n\n def test_large(self):\n filename = os.path.join(os.path.dirname(__file__), 'large.efg')\n extensive = pygambit.Game.read_game(filename)\n\n normal = np.array([\n [(1, 1), (2, 2), (3, 3)],\n [(4, 4), (5, 5), (3, 3)],\n [(4, 4), (6, 6), (3, 3)],\n [(4, 4), (7, 7), (8, 8)]\n ])\n\n self.assertEqual(build_representation(normal, 'large'), extensive.write())\n\n def test_large_diff_payoffs(self):\n filename = os.path.join(os.path.dirname(__file__), 'large-diff-payoffs.efg')\n extensive = pygambit.Game.read_game(filename)\n\n normal = np.array([\n [(-1, 1), (2, 2), (5, 2)],\n [(4, 4), (3, 4), (5, 2)],\n [(4, 4), (1, 2), (5, 2)],\n [(4, 4), (0, 0), (16, -8)]\n ])\n\n self.assertEqual(build_representation(normal, 'large'), extensive.write())\n\n def test_four_by_four(self):\n filename = os.path.join(os.path.dirname(__file__), 'four-by-four.efg')\n extensive = pygambit.Game.read_game(filename)\n\n normal = np.array([\n [(1, 1), (2, 2), (4, 4), (6, 6)],\n [(1, 1), (2, 2), (5, 5), (6, 6)],\n [(3, 3), (3, 3), (4, 4), (6, 6)],\n [(3, 3), (3, 3), (5, 5), (6, 6)]\n ])\n\n self.assertEqual(build_representation(normal, 'four-by-four'), extensive.write())\n\n def test_three_players(self):\n filename = os.path.join(os.path.dirname(__file__), 'three-player.efg')\n extensive = pygambit.Game.read_game(filename)\n\n normal = np.array([\n [\n [(1, 1, 1), (1, 1, 1)],\n [(2, 2, 2), (3, 3, 3)]\n ],\n [\n [(4, 4, 4), (6, 6, 6)],\n [(5, 5, 5), (6, 6, 6)]\n ],\n ])\n\n self.assertEqual(build_representation(normal, 'three-player'), extensive.write())\n \n # non-playable representations may be generated when the algorithm is applied to 2-player games\n def test_non_playable(self):\n filename = os.path.join(os.path.dirname(__file__), 'non-playable.efg.')\n extensive = pygambit.Game.read_game(filename)\n\n normal = np.array([\n [(7, 7), (8, 8), (10, 10), (10, 10)],\n [(7, 7), (9, 9), (11, 11), (12, 12)],\n [(2, 2), (3, 3), (4, 4), (6, 6)],\n [(1, 1), (1, 1), (5, 5), (6, 6)]\n ])\n\n self.assertEqual(build_representation(normal, 'non-playable'), extensive.write())\n\n def test_wrong_shape(self):\n normal = np.array([\n [\n [(1, 1, 1), (1, 1, 1)],\n ],\n [\n [(4, 4, 4), (6, 6, 6)],\n [(5, 5, 5), (6, 6, 6)]\n ],\n ], dtype=object)\n\n self.assertRaises(ValueError, build_representation, normal)\n\n def test_wrong_payoff_shape(self):\n normal = np.array([\n [\n [(1, 1, 1), (1, 1, 1)],\n [(2, 2, 2), (3, 3, 3)]\n ],\n [\n [(4, 4), (6, 6)],\n [(5, 5), (6, 6)]\n ],\n ], dtype=object)\n\n self.assertRaises(ValueError, build_representation, normal)\n\n def test_wrong_payoff_type(self):\n normal = np.array([\n [('a', 'b'), ('c', 'd')],\n [('a', 'b'), ('c', 'd')],\n ])\n self.assertRaises(ValueError, build_representation, normal)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
vtlim/GLIC
[ "90e00e7030748c70ad284cda8785745b6c16ecbb" ]
[ "archive/contact_map.py" ]
[ "\n# https://contact-map.readthedocs.io/en/latest/examples/nb/contact_map.html\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport mdtraj as md\nfrom contact_map import ContactMap, ContactFrequency, ContactDifference\n\ndef map_contacts(fname):\n\n traj = md.load(fname)\n contacts = ContactMap(traj[0])\n (fig, ax) = contacts.residue_contacts.plot(cmap='seismic', vmin=-1, vmax=1)\n plt.xlabel(\"Residue\")\n plt.ylabel(\"Residue\")\n\n figname = os.path.splitext(fname)[0] + '.png'\n plt.savefig(figname, bbox_inches='tight')\n plt.show()\n\n return contacts\n\ncontacts1 = map_contacts(sys.argv[1])\ncontacts2 = map_contacts(sys.argv[2])\n\ndiff = contacts1 - contacts2\ndiff.residue_contacts.plot(cmap='seismic', vmin=-1, vmax=1)\n\nplt.savefig('diff.png', bbox_inches='tight')\nplt.show()\n\ndiff_residues = diff.residue_contacts.most_common()\n\nprint('\\nMOST POSITIVE VALUES FOR {} - {}'.format(sys.argv[1], sys.argv[2]))\nprint(diff_residues[:10])\n\nprint('\\nMOST NEGATIVE VALUES FOR {} - {}'.format(sys.argv[1], sys.argv[2]))\nprint(diff_residues[:-10:-1])\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel" ] ]
LihengXu/WPSS
[ "40eaa809ffb19447ab3c583440a1875c49523c10" ]
[ "controller.py" ]
[ "import numpy as np\nimport time\nimport pprint\nfrom collections import OrderedDict\nfrom keras import backend as K\nimport tensorflow as tf\nimport os\n\n\nclass StateSpace:\n \"\"\"\n State Space manager\n\n Provides utilit functions for holding \"states\" / \"actions\" that the controller\n must use to train and predict.\n\n Also provides a more convenient way to define the search space\n \"\"\"\n def __init__(self):\n self.states = OrderedDict()\n self.state_count_ = 0\n\n def add_state(self, name, values):\n \"\"\"\n Adds a \"state\" to the state manager, along with some metadata for efficient\n packing and unpacking of information required by the RNN Controller.\n\n Stores metadata such as:\n - Global ID\n - Name\n - Valid Values\n - Number of valid values possible\n - Map from value ID to state value\n - Map from state value to value ID\n\n Args:\n name: name of the state / action\n values: valid values that this state can take\n\n Returns:\n Global ID of the state. Can be used to refer to this state later.\n \"\"\"\n index_map = {}\n for i, val in enumerate(values):\n index_map[i] = val\n\n value_map = {}\n for i, val in enumerate(values):\n value_map[val] = i\n\n metadata = {\n 'id': self.state_count_,\n 'name': name,\n 'values': values,\n 'size': len(values),\n 'index_map_': index_map,\n 'value_map_': value_map,\n }\n self.states[self.state_count_] = metadata\n self.state_count_ += 1\n\n return self.state_count_ - 1\n\n def embedding_encode(self, id, value):\n \"\"\"\n Embedding index encode the specific state value\n\n Args:\n id: global id of the state\n value: state value\n\n Returns:\n embedding encoded representation of the state value\n \"\"\"\n state = self[id]\n size = state['size']\n value_map = state['value_map_']\n value_idx = value_map[value]\n\n one_hot = np.zeros((1, size), dtype=np.float32)\n one_hot[np.arange(1), value_idx] = 1\n return one_hot\n\n def get_state_value(self, id, index):\n \"\"\"\n Retrieves the state value from the state value ID\n\n Args:\n id: global id of the state\n index: index of the state value (usually from argmax)\n\n Returns:\n The actual state value at given value index\n \"\"\"\n state = self[id]\n index_map = state['index_map_']\n\n if (type(index) == list or type(index) == np.ndarray) and len(index) == 1:\n index = index[0]\n\n value = index_map[index]\n return value\n\n def get_random_state_space(self, num_layers):\n \"\"\"\n Constructs a random initial state space for feeding as an initial value\n to the Controller RNN\n\n Args:\n num_layers: number of layers to duplicate the search space\n\n Returns:\n A list of one hot encoded states\n \"\"\"\n states = []\n\n for id in range(self.size * num_layers):\n state = self[id]\n size = state['size']\n\n sample = np.random.choice(size, size=1)\n sample = state['index_map_'][sample[0]]\n state = self.embedding_encode(id, sample)\n states.append(state)\n return states\n\n def parse_state_space_list(self, state_list):\n \"\"\"\n Parses a list of one hot encoded states to retrieve a list of state values\n\n Args:\n state_list: list of one hot encoded states\n\n Returns:\n list of state values\n \"\"\"\n state_values = []\n for id, state_one_hot in enumerate(state_list):\n state_val_idx = np.argmax(state_one_hot, axis=-1)[0]\n value = self.get_state_value(id, state_val_idx)\n state_values.append(value)\n\n return state_values\n\n def print_state_space(self):\n \"\"\" Pretty print the state space \"\"\"\n print('*' * 40, 'STATE SPACE', '*' * 40)\n\n pp = pprint.PrettyPrinter(indent=2, width=100)\n for id, state in self.states.items():\n pp.pprint(state)\n print()\n\n def print_actions(self, actions):\n \"\"\" Print the action space properly \"\"\"\n print('Actions :')\n\n for id, action in enumerate(actions):\n if id % self.size == 0:\n print(\"*\" * 20, \"Layer %d\" % (((id + 1) // self.size) + 1), \"*\" * 20)\n\n state = self[id]\n name = state['name']\n vals = [(n, p) for n, p in zip(state['values'], *action)]\n print(\"%s : \" % name, vals)\n print()\n\n def __getitem__(self, id):\n return self.states[id % self.size]\n\n @property\n def size(self):\n return self.state_count_\n\n\nclass Controller:\n \"\"\"\n Utility class to manage the RNN Controller\n \"\"\"\n def __init__(self, policy_session, cfg, state_space,\n discount_factor=0.99,\n clip_norm=0.0,\n ):\n self.policy_session = policy_session # type: tf.Session\n self.cfg = cfg\n\n # save the path for .ckpt\n if not os.path.exists(self.cfg.CONTROLLER.WEIFHTS_DIR_PATH):\n os.makedirs(self.cfg.CONTROLLER.WEIFHTS_DIR_PATH)\n\n self.num_layers = self.cfg.CONTROLLER.NUM_LAYERS\n self.state_space = state_space # type: StateSpace\n self.state_size = self.state_space.size\n\n self.controller_cells = self.cfg.CONTROLLER.CONTROLLER_CELLS\n self.embedding_dim = self.cfg.CONTROLLER.EMBEDDING_DIM\n self.reg_strength = self.cfg.CONTROLLER.REGULARIZATION\n self.discount_factor = discount_factor\n self.exploration_dis_factor = self.cfg.CONTROLLER.EXPLORATION_DIS_FACTOR\n self.exploration = self.cfg.CONTROLLER.EXPLORATION\n self.restore_controller = self.cfg.CONTROLLER.RESTORE_CONTROLLER\n self.clip_norm = clip_norm\n\n self.reward_buffer = []\n self.state_buffer = []\n\n self.cell_outputs = []\n self.policy_classifiers = []\n self.policy_actions = []\n self.policy_labels = []\n\n self.build_policy_network()\n self.flag = True\n\n def get_action(self, state, explore_flag=True):\n \"\"\"\n Gets a one hot encoded action list, either from random sampling or from\n the Controller RNN\n\n Args:\n state: a list of one hot encoded states, whose first value is used as initial\n state for the controller RNN\n\n Returns:\n A one hot encoded action list\n \"\"\"\n if np.random.random() < self.exploration and explore_flag:\n print(\"Generating random action to explore\")\n self.flag = False\n actions = []\n\n for i in range(self.state_size * self.num_layers):\n state_ = self.state_space[i]\n size = state_['size']\n\n sample = np.random.choice(size, size=1)\n sample = state_['index_map_'][sample[0]]\n action = self.state_space.embedding_encode(i, sample)\n actions.append(action)\n return actions\n\n else:\n print(\"Prediction action from Controller\")\n self.flag = True\n initial_state = self.state_space[0]\n size = initial_state['size']\n\n if state[0].shape != (1, size):\n state = state[0].reshape((1, size)).astype('int32')\n else:\n state = state[0]\n\n print(\"State input to Controller for Action : \", state.flatten())\n\n with self.policy_session.as_default():\n K.set_session(self.policy_session)\n\n with tf.name_scope('action_prediction'):\n pred_actions = self.policy_session.run(self.policy_actions, feed_dict={self.state_input: state})\n\n return pred_actions\n\n def build_policy_network(self):\n with self.policy_session.as_default():\n K.set_session(self.policy_session)\n\n with tf.name_scope('controller'):\n with tf.variable_scope('policy_network'):\n\n # state input is the first input fed into the controller RNN.\n # the rest of the inputs are fed to the RNN internally\n with tf.name_scope('state_input'):\n state_input = tf.placeholder(dtype=tf.int32, shape=(1, None), name='state_input')\n\n self.state_input = state_input\n\n # we can use LSTM as the controller as well\n nas_cell = tf.nn.rnn_cell.LSTMCell(self.controller_cells)\n cell_state = nas_cell.zero_state(batch_size=1, dtype=tf.float32)\n\n embedding_weights = []\n\n # for each possible state, create a new embedding. Reuse the weights for multiple layers.\n with tf.variable_scope('embeddings', reuse=tf.AUTO_REUSE):\n for i in range(self.state_size):\n state_ = self.state_space[i]\n size = state_['size']\n\n # size + 1 is used so that 0th index is never updated and is \"default\" value\n weights = tf.get_variable('state_embeddings_%d' % i,\n shape=[size + 1, self.embedding_dim],\n initializer=tf.initializers.random_uniform(-1., 1.))\n\n embedding_weights.append(weights)\n\n # initially, cell input will be 1st state input\n embeddings = tf.nn.embedding_lookup(embedding_weights[0], state_input)\n\n cell_input = embeddings\n\n # we provide a flat list of chained input-output to the RNN\n for i in range(self.state_size * self.num_layers):\n state_id = i % self.state_size\n state_space = self.state_space[i]\n size = state_space['size']\n\n with tf.name_scope('controller_output_%d' % i):\n # feed the ith layer input (i-1 layer output) to the RNN\n outputs, final_state = tf.nn.dynamic_rnn(nas_cell,\n cell_input,\n initial_state=cell_state,\n dtype=tf.float32)\n\n # add a new classifier for each layers output\n classifier = tf.layers.dense(outputs[:, -1, :], units=size, name='classifier_%d' % i,\n reuse=False)\n preds = tf.nn.softmax(classifier)\n\n # feed the previous layer (i-1 layer output) to the next layers input, along with state\n # take the class label\n cell_input = tf.argmax(preds, axis=-1)\n cell_input = tf.expand_dims(cell_input, -1, name='pred_output_%d' % i)\n cell_input = tf.cast(cell_input, tf.int32)\n # we avoid using 0 so as to have a \"default\" embedding at 0th index\n cell_input = tf.add(cell_input, 1)\n\n # embedding lookup of this state using its state weights ; reuse weights\n cell_input = tf.nn.embedding_lookup(embedding_weights[state_id], cell_input,\n name='cell_output_%d' % i)\n\n cell_state = final_state\n\n # store the tensors for later loss computation\n self.cell_outputs.append(cell_input)\n self.policy_classifiers.append(classifier)\n self.policy_actions.append(preds)\n\n policy_net_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='policy_network')\n\n with tf.name_scope('optimizer'):\n self.global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.001\n learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,\n 100, 0.95, staircase=True)\n\n tf.summary.scalar('learning_rate', learning_rate)\n\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n with tf.name_scope('losses'):\n self.discounted_rewards = tf.placeholder(tf.float32, shape=(None,), name='discounted_rewards')\n tf.summary.scalar('discounted_reward', tf.reduce_sum(self.discounted_rewards))\n\n # calculate sum of all the individual classifiers\n cross_entropy_loss = 0\n for i in range(self.state_size * self.num_layers):\n classifier = self.policy_classifiers[i]\n state_space = self.state_space[i]\n size = state_space['size']\n\n with tf.name_scope('state_%d' % (i + 1)):\n labels = tf.placeholder(dtype=tf.float32, shape=(None, size), name='cell_label_%d' % i)\n self.policy_labels.append(labels)\n\n ce_loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=classifier, labels=labels)\n tf.summary.scalar('state_%d_ce_loss' % (i + 1), tf.reduce_mean(ce_loss))\n\n cross_entropy_loss += ce_loss\n\n policy_gradient_loss = tf.reduce_mean(cross_entropy_loss)\n reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in policy_net_variables]) # Regularization\n\n # sum up policy gradient and regularization loss\n self.total_loss = policy_gradient_loss + self.reg_strength * reg_loss\n # self.total_loss = policy_gradient_loss\n tf.summary.scalar('total_loss', self.total_loss)\n tf.summary.scalar('reg_loss', self.reg_strength * reg_loss)\n\n self.gradients = self.optimizer.compute_gradients(self.total_loss)\n\n with tf.name_scope('policy_gradients'):\n # normalize gradients so that they dont explode if argument passed\n if self.clip_norm is not None and self.clip_norm != 0.0:\n norm = tf.constant(self.clip_norm, dtype=tf.float32)\n gradients, vars = zip(*self.gradients) # unpack the two lists of gradients and the variables\n gradients, _ = tf.clip_by_global_norm(gradients, norm) # clip by the norm\n self.gradients = list(zip(gradients, vars)) # we need to set values later, convert to list\n\n # compute policy gradients\n for i, (grad, var) in enumerate(self.gradients):\n if grad is not None:\n self.gradients[i] = (grad * self.discounted_rewards, var)\n\n # training update\n with tf.name_scope(\"train_policy_network\"):\n # apply gradients to update policy network\n self.train_op = self.optimizer.apply_gradients(self.gradients, global_step=self.global_step)\n\n self.summaries_op = tf.summary.merge_all()\n\n timestr = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n filename = 'logs/%s' % timestr\n\n self.summary_writer = tf.summary.FileWriter(filename, graph=self.policy_session.graph)\n\n self.policy_session.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver(max_to_keep=1)\n\n if self.restore_controller:\n path = tf.train.latest_checkpoint(self.cfg.CONTROLLER.WEIFHTS_DIR_PATH)\n\n if path is not None and tf.train.checkpoint_exists(path):\n print(\"Loading Controller Checkpoint !\")\n self.saver.restore(self.policy_session, path)\n\n def store_rollout(self, state, reward):\n self.reward_buffer.append(reward)\n self.state_buffer.append(state)\n\n # dump buffers to file if it grows larger than 50 items\n if len(self.reward_buffer) > 20:\n with open('buffers.txt', mode='a+') as f:\n for i in range(20):\n state_ = self.state_buffer[i]\n state_list = self.state_space.parse_state_space_list(state_)\n state_list = ','.join(str(v) for v in state_list)\n\n f.write(\"%0.4f,%s\\n\" % (self.reward_buffer[i], state_list))\n\n print(\"Saved buffers to file `buffers.txt` !\")\n\n self.reward_buffer = [self.reward_buffer[-1]]\n self.state_buffer = [self.state_buffer[-1]]\n\n def discount_rewards(self):\n \"\"\"\n Compute discounted rewards over the entire reward buffer\n\n Returns:\n Discounted reward value\n \"\"\"\n rewards = np.asarray(self.reward_buffer)\n discounted_rewards = np.zeros_like(rewards)\n running_add = 0\n for t in reversed(range(0, rewards.size)):\n if rewards[t] != 0:\n running_add = 0\n running_add = running_add * self.discount_factor + rewards[t]\n discounted_rewards[t] = running_add\n return discounted_rewards[-1]\n\n def train_step(self):\n \"\"\"\n Perform a single train step on the Controller RNN\n\n Returns:\n the training loss\n \"\"\"\n states = self.state_buffer[-1]\n label_list = []\n\n # parse the state space to get real value of the states,\n # then one hot encode them for comparison with the predictions\n state_list = self.state_space.parse_state_space_list(states)\n for id, state_value in enumerate(state_list):\n state_one_hot = self.state_space.embedding_encode(id, state_value)\n label_list.append(state_one_hot)\n\n # the initial input to the controller RNN\n state_input_size = self.state_space[0]['size']\n state_input = states[0].reshape((1, state_input_size)).astype('int32')\n print(\"State input to Controller for training : \", state_input.flatten())\n\n # the discounted reward value\n reward = self.discount_rewards()\n reward = np.asarray([reward]).astype('float32')\n\n feed_dict = {\n self.state_input: state_input,\n self.discounted_rewards: reward\n }\n\n # prepare the feed dict with the values of all the policy labels for each\n # of the Controller outputs\n for i, label in enumerate(label_list):\n feed_dict[self.policy_labels[i]] = label\n\n with self.policy_session.as_default():\n K.set_session(self.policy_session)\n\n print(\"Training RNN (States ip) : \", state_list)\n print(\"Training RNN (Reward ip) : \", reward.flatten())\n _, loss, summary, global_step = self.policy_session.run([self.train_op,\n self.total_loss,\n self.summaries_op,\n self.global_step\n ],\n feed_dict=feed_dict)\n if self.flag:\n self.summary_writer.add_summary(summary, global_step)\n # self.saver.save(self.policy_session, save_path=self.cfg.CONTROLLER.WEIFHTS_FILE_PATH,\n # global_step=self.global_step)\n self.saver.save(self.policy_session, save_path=self.cfg.CONTROLLER.WEIFHTS_FILE_PATH)\n\n if global_step != 0 and global_step % 20 == 0 and self.exploration > 0.5:\n self.exploration *= self.exploration_dis_factor\n\n return loss\n" ]
[ [ "tensorflow.nn.dynamic_rnn", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.cast", "numpy.zeros_like", "tensorflow.summary.scalar", "tensorflow.Variable", "tensorflow.get_collection", "numpy.arange", "tensorflow.layers.dense", "tensorflow.train.exponential_decay", "numpy.argmax", "tensorflow.add", "tensorflow.name_scope", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.argmax", "numpy.zeros", "tensorflow.train.RMSPropOptimizer", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.initializers.random_uniform", "tensorflow.nn.embedding_lookup", "tensorflow.train.checkpoint_exists", "tensorflow.nn.softmax", "tensorflow.summary.FileWriter", "numpy.random.random", "tensorflow.train.latest_checkpoint", "tensorflow.reduce_mean", "tensorflow.constant", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.expand_dims", "tensorflow.clip_by_global_norm", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "tensorflow.variable_scope" ] ]
mzurzolo/STBS
[ "0e3b5fcb88f7d488029ba71012787f36a2d97c70" ]
[ "Python/Projects/project3/project3.backup.py" ]
[ "\"\"\"Import statements tell python what packages you'll be using.\n\nYou can use 'as' to change how you refer to the package. In this file,\nI import matplotlib.pyplot as plt so I don't have to type out\n'matplotlib.pyplot' every time I want to use it.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n###############################################################################\n\"\"\"After the import statements, I define one function: plot_with_pandas.\nFunction definitions start with the word 'def' and end when\nthe indentation ends. For example:\n\n\ndef ex1():\n print(\"This line is part of the function 'ex1' because it's indented\")\n\n print(\"This line is still part of the function 'ex1'\")\n print(\"Blank lines do not end the current level of indentation\")\n\nprint(\"This line is not part of the function 'ex1' because it's not indented\")\n\n\n\"\"\"\n\n\ndef plot_with_pandas():\n \"\"\"Generate plot, using pandas.\"\"\"\n # pandas has an easy way to read csv data.\n # you can use pd.read_csv(), with a string that\n # tells pandas where to find the data file.\n # See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\n # for full documentation.\n df = pd.read_csv(\"inputs/data.csv\")\n\n # Now, there's a pandas dataframe named 'df' that holds all the information\n # that was in inputs/data.csv\n # A dataframe is a container we can store data in. For now, you can\n # visualize it as a table, or a spreadsheet in excel.\n\n # Dataframes come with their own built-in way to generate graphs.\n # This next line will generate a graph that plots the x column along the\n # x axis, and the sin and cos columns on the y-axis.\n df.plot(x=\"x\", y=[\"sin\", \"cos\"])\n\n # Pandas uses matplotlib.pyplot to generate graphs so you don't have to\n # interact with matplotlib.pyplot directly. In the next line, we DO interact\n # with matplotlib.pyplot directly to show the graph. We set block=False\n # because otherwise, our program will pause here until the graph is shown.\n # Instead, our program continues to run until the line below that\n # starts with 'input'\n # input() will print a string (a string is just text with quotes\n # around it) and wait for the user to press enter.\n plt.show(block=False)\n\n\ndef generate_new_data():\n \"\"\"Generate new data.\"\"\"\n # In this function, I'll show you how I generated the input.csv file you\n # used in project 1 and 2.\n x = np.linspace(0, 100, 1000)\n # you can look at x by removing the '#' at the beginning of the line below:\n #print(x)\n y = np.linspace(0, 100, 1000)\n # you can look at y by removing the '#' at the beginning of the line below:\n #print(y)\n\n # More complicated data can be generated with loops\n # First, create an empty list\n sin = []\n # Then, loop thorugh x, change each value of x in some way, and add it to\n # the end of the list.\n for i in x:\n changed_value = np.sin(i)\n # append means 'add to the end'\n sin.append(changed_value)\n\n # you can look at sin by removing the '#' at the beginning of the line below:\n #print(sin)\n\n # np.sin() and np.cos() allow you to calculate the sine and cosine of\n # more than one value at a time\n cos = np.cos(x)\n # you can look at cos by removing the '#' at the beginning of the line below:\n #print(cos)\n\n # To easily write data to a csv, make a pandas dataframe.\n # first, create a dictionary of the data. For this example,\n # the dictionary can be thought of as an excel spreadsheet, where the 'key'\n # is the column header, and the :value is the data in the column.\n temp_dictionary = {'x' : x,\n 'y' : y,\n 'sin' : sin,\n 'cos' : cos}\n\n # then, use the dictionary to create the dataframe\n df = pd.DataFrame(temp_dictionary)\n\n # dataframes can be written to csv files directly\n df.to_csv(\"inputs/data.csv\", index=False)\n\n\n###############################################################################\n# These next lines 'call' the functions defined above.\n# If you erase the lines below (or put a # in front of them) this code won't\n# 'do' anything (it won't generate a graph anymore.)\ngenerate_new_data()\nplot_with_pandas()\n\ninput(\"Press enter to close the graph (if it's still open) and end the program\")\n\n# This program is broken into three parts:\n# Part 1 pulls in (imports) code that other people wrote.\n# Part 2 defines a function that you'll be using later.\n# Our function, plot_with_pandas, is really only 3 lines of running code,\n# but defining a function allows us to use those 3 lines together, without\n# having to re-type them every time we want them to run.\n# Part 3 is the 'execution' part. It's the part that python actually 'does'.\n# It runs the function defined in part 2, then it pauses on the input line until\n# the user (you) hits enter.\n" ]
[ [ "pandas.read_csv", "numpy.linspace", "numpy.cos", "pandas.DataFrame", "numpy.sin", "matplotlib.pyplot.show" ] ]
CameronPeloquin/Meteorite-Analysis
[ "095d567d1ae1dee795ded02abbcaa7596f8a33bc" ]
[ "plot_bar.py" ]
[ "\"\"\"\nContains code to plot a barplot of the counts for meteors in\nlow and high areas of population and vegetation.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Latitude above antarctica to filter out meteor strikes from it.\n# Antarctica is excluded because it contains the vast majority of\n# meteors in the dataset.\nFILTER_ANTARCTIC = -60\n\n# Filter year on meteors for population, as population\n# distribution was likely very different from today before 1900.\nFILTER_YEAR = 1900\n\ndef plot_bar(data, popup=False):\n \"\"\"\n Plots a barplot showing the number of meteors in low and high\n vegetation and population zones, excluding Antarctica. Shows the\n Figure if popup is true, saves it if false.\n \"\"\"\n plt.close()\n pop_data = data['pop_tiles']\n veg_data = data['veg_tiles']\n strikes = data['strikes']\n\n # Filters out data from Antarctica\n strikes = strikes[ strikes['lat'] > FILTER_ANTARCTIC ]\n\n # Filters meteors without a year found, and those found before 1900\n strikes['year'] = strikes['year'].str.slice(start=6, stop=10)\n strikes.year.fillna(0, inplace=True)\n strikes['year'] = strikes['year'].astype(int)\n recent_strikes = strikes[ strikes['year'] > FILTER_YEAR ]\n print(\"Meteor Strikes post 1900: {}\".format(recent_strikes['year']))\n\n meteors_pop = create_plot_data(pop_data, recent_strikes, 'Population')\n meteors_veg = create_plot_data(veg_data, strikes, 'Vegetation')\n\n fig, [ax1, ax2] = plt.subplots(2)\n fig.tight_layout()\n sns.countplot(x='Population', data=meteors_pop, ax=ax1)\n sns.countplot(x='Vegetation', data=meteors_veg, ax=ax2)\n \n\n if popup:\n plt.show()\n else:\n plt.savefig('plot_bar.png')\n\ndef create_plot_data(data, strikes, label):\n \"\"\"\n Converts the data into a pandas dataframe containing\n the counts of meteors that have landed in locations\n marked with high and low population or vegetaion and \n returns it.\n \"\"\"\n\n # Splits data into high and low groups.\n median = data['val'].median()\n high = data[ data['val'] > median ]\n low = data[ data['val'] <= median ]\n\n # Calculates the number of meteors that landed in\n # high and low areas\n high_meteors = gpd.sjoin(high, strikes, op='intersects')\n low_meteors = gpd.sjoin(low, strikes, op='intersects')\n num_high = len(high_meteors)\n num_low = len(low_meteors)\n\n # Puts data into a dataframe that can be used to create a count plot\n high_count = np.full((num_high,), 'High ' + label)\n low_count = np.full((num_low,), 'Low ' + label)\n count = np.concatenate((high_count, low_count))\n meteors = pd.DataFrame()\n meteors[label] = count\n return meteors\n\n\nif __name__ == '__main__':\n from ingest import ingest_all_data\n #plot_bar( ingest_all_data(fileset='test'), popup=True )\n plot_bar( ingest_all_data(), popup=True )" ]
[ [ "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.full", "numpy.concatenate", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.show" ] ]
kuasho/scenario_runner
[ "600398852ab2dfac419b2cc8b6f0df8420b2bd0e" ]
[ "src/openface_utils/carla_utils.py" ]
[ "#!/usr/bin/env python \n# Javier Araluce\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n# Allows controlling a vehicle with a keyboard. For a simpler and more\n# documented example, please take a look at tutorial.py.\n\n\"\"\"\nWelcome to CARLA manual control.\n\nUse ARROWS or WASD keys for control.\n\n W : throttle\n S : brake\n A/D : steer left/right\n Q : toggle reverse\n Space : hand-brake\n P : toggle autopilot\n M : toggle manual transmission\n ,/. : gear up/down\n CTRL + W : toggle constant velocity mode at 60 km/h\n\n L : toggle next light type\n SHIFT + L : toggle high beam\n Z/X : toggle right/left blinker\n I : toggle interior light\n\n TAB : change sensor position\n ` or N : next sensor\n [1-9] : change to sensor [1-9]\n G : toggle radar visualization\n C : change weather (Shift+C reverse)\n Backspace : change vehicle\n\n R : toggle recording images to disk\n\n CTRL + R : toggle recording of simulation (replacing any previous)\n CTRL + P : start replaying last recorded simulation\n CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)\n CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)\n\n F1 : toggle HUD\n H/? : toggle help\n ESC : quit\n\"\"\"\n\nfrom __future__ import print_function\n\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\n\n\nimport glob\nimport os\nimport sys\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n# ==============================================================================\n# -- imports -------------------------------------------------------------------\n# ==============================================================================\n\n\nimport carla\n\nfrom carla import ColorConverter as cc\n\nimport argparse\nimport collections\nimport datetime\nimport logging\nimport math\nimport random\nimport re\nimport weakref\n\nfrom threading import Timer\nimport time\n\nif sys.version_info >= (3, 0):\n\n from configparser import ConfigParser\n\nelse:\n\n from ConfigParser import RawConfigParser as ConfigParser\n\ntry:\n import pygame\n from pygame.locals import KMOD_CTRL\n from pygame.locals import KMOD_SHIFT\n from pygame.locals import K_0\n from pygame.locals import K_9\n from pygame.locals import K_BACKQUOTE\n from pygame.locals import K_BACKSPACE\n from pygame.locals import K_COMMA\n from pygame.locals import K_DOWN\n from pygame.locals import K_ESCAPE\n from pygame.locals import K_F1\n from pygame.locals import K_LEFT\n from pygame.locals import K_PERIOD\n from pygame.locals import K_RIGHT\n from pygame.locals import K_SLASH\n from pygame.locals import K_SPACE\n from pygame.locals import K_TAB\n from pygame.locals import K_UP\n from pygame.locals import K_a\n from pygame.locals import K_c\n from pygame.locals import K_g\n from pygame.locals import K_d\n from pygame.locals import K_h\n from pygame.locals import K_m\n from pygame.locals import K_n\n from pygame.locals import K_p\n from pygame.locals import K_q\n from pygame.locals import K_r\n from pygame.locals import K_s\n from pygame.locals import K_w\n from pygame.locals import K_l\n from pygame.locals import K_i\n from pygame.locals import K_z\n from pygame.locals import K_x\n from pygame.locals import K_MINUS\n from pygame.locals import K_EQUALS\n\n from pygame.locals import K_4\n from pygame.locals import K_KP0\n from pygame.locals import K_KP4 \n\nexcept ImportError:\n raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n\n# ==============================================================================\n# -- World ---------------------------------------------------------------------\n# ==============================================================================\n\nclass World(object):\n def __init__(self, carla_world, hud, args):\n\n self.transition_timer = args.transition_timer\n self.world = carla_world\n self.actor_role_name = args.rolename\n self.map = self.world.get_map()\n self.hud = hud\n self.player = None\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.gnss_sensor = None\n self.camera_manager_rgb = None\n self.camera_manager_depth = None\n self.camera_manager_semantic = None\n self.camera_manager_lidar = None\n self._weather_presets = find_weather_presets()\n self._weather_index = 0\n self._actor_filter = args.filter\n self._gamma = args.gamma\n # self.point = args.point\n # self.orientation = args.orientation\n self.rgb_flag = 1 #args.RGB\n self.semantic_flag = 1 #args.Semantic\n self.lidar_flag = 0 #args.LIDAR\n self.depth_flag = 1 #args.depth\n self.previous_rendered = 0\n # self.sensors = ['0', 'Camera RGB', 'Camera Semantic Segmentation (CityScapes Palette)', 'Lidar (Ray-Cast)', 'Camera Depth (Raw)']\n self.sensors = ['0', 'Camera RGB', 'Camera Semantic Segmentation (Raw)', 'Lidar (Ray-Cast)', 'Camera Depth (Raw)']\n \n self.sensor_flags = [self.rgb_flag, self.semantic_flag, self.lidar_flag, self.depth_flag]\n self.args_width = args.width\n self.args_height = args.height\n\n self.restart()\n self.world.on_tick(hud.on_world_tick)\n self.recording_enabled = False\n self.recording_start = 0\n\n settings = self.world.get_settings()\n settings.synchronous_mode = True # Enables synchronous mode\n self.world.apply_settings(settings)\n\n def restart(self):\n # Keep same camera config if the camera manager exists.\n cam_index = self.camera_manager_rgb.index if self.camera_manager_rgb is not None else 0\n cam_pos_index = self.camera_manager_rgb._transform_index if self.camera_manager_rgb is not None else 0\n # Get a random blueprint.\n blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))\n blueprint.set_attribute('role_name', self.actor_role_name)\n if blueprint.has_attribute('color'):\n color = random.choice(blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n # Spawn the player.\n if self.player is not None:\n spawn_point = self.player.get_transform()\n spawn_point.location.z += 2.0\n spawn_point.rotation.roll = 0.0\n spawn_point.rotation.pitch = 0.0\n self.destroy()\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n while self.player is None:\n spawn_points = self.map.get_spawn_points()\n spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()\n spawn_point.location.x = int(self.point[0])\n spawn_point.location.y = int(self.point[1])\n spawn_point.location.z = int(self.point[2])\n spawn_point.rotation.yaw = int(self.orientation)\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n # Set up the sensors.\n self.collision_sensor = CollisionSensor(self.player, self.hud)\n self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)\n self.gnss_sensor = GnssSensor(self.player)\n if self.rgb_flag:\n self.camera_manager_rgb = CameraManagerRGB(self.player, self.hud, self._gamma, self.args_width, self.args_height)\n self.camera_manager_rgb._transform_index = cam_pos_index\n self.camera_manager_rgb.set_sensor(cam_index, notify=False)\n if self.depth_flag:\n self.camera_manager_depth = CameraManagerDepth(self.player, self.hud, self._gamma, self.args_width, self.args_height)\n self.camera_manager_depth._transform_index = cam_pos_index\n self.camera_manager_depth.set_sensor(cam_index, notify=False)\n if self.semantic_flag:\n self.camera_manager_semantic = CameraManagerSemantic(self.player, self.hud, self._gamma, self.args_width, self.args_height)\n self.camera_manager_semantic._transform_index = cam_pos_index\n self.camera_manager_semantic.set_sensor(cam_index, notify=False)\n # if self.lidar_flag:\n # self.camera_manager_lidar = CameraManager(self.player, self.hud, self.args_width, self.args_height)\n # self.camera_manager_lidar._transform_index = cam_pos_index\n # self.camera_manager_lidar.set_sensor(cam_index, notify=False)\n actor_type = get_actor_display_name(self.player)\n self.hud.notification(actor_type)\n\n def next_weather(self, reverse=False):\n self._weather_index += -1 if reverse else 1\n self._weather_index %= len(self._weather_presets)\n preset = self._weather_presets[self._weather_index]\n self.hud.notification('Weather: %s' % preset[1])\n self.player.get_world().set_weather(preset[0])\n\n def tick(self, clock):\n self.hud.tick(self, clock)\n\n def render(self, display, camera_rendered):\n if camera_rendered == 1 and self.rgb_flag: self.camera_manager_rgb.render(display)\n elif camera_rendered == 2 and self.semantic_flag: self.camera_manager_semantic.render(display)\n elif camera_rendered == 3 and self.lidar_flag: self.camera_manager_lidar.render(display)\n elif camera_rendered == 4 and self.depth_flag: self.camera_manager_depth.render(display)\n if camera_rendered != self.previous_rendered:\n self.hud.notification(self.sensors[camera_rendered])\n if self.sensor_flags[camera_rendered-1] == 0: self.hud.notification(\"Not rendered sensor. Relaunch agent and activate sensor through Python arguments\")\n\n self.previous_rendered = camera_rendered #detects if rendered sensor has changed\n self.hud.render(display)\n\n def destroySensors(self):\n self.camera_manager_rgb.sensor.destroy()\n self.camera_manager_rgb.sensor = None\n self.camera_manager_rgb._index = None\n\n def destroy(self):\n actors = [\n self.collision_sensor.sensor,\n self.lane_invasion_sensor.sensor,\n self.gnss_sensor.sensor,\n self.player]\n \n if self.rgb_flag: actors.append(self.camera_manager_rgb.sensor)\n if self.depth_flag: actors.append(self.camera_manager_depth.sensor)\n if self.semantic_flag: actors.append(self.camera_manager_semantic.sensor)\n if self.lidar_flag: actors.append(self.camera_manager_lidar.sensor)\n\n for actor in actors:\n if actor is not None:\n actor.destroy()\n\n\n# ==============================================================================\n# -- CameraManagerRGB ----------------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManagerRGB(object):\n def __init__(self, parent_actor, hud, gamma_correction, args_width, args_height):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n self.args_width = args_width\n self.args_height = args_height\n Attachment = carla.AttachmentType\n '''\n self._camera_transforms = [\t#Add different camera positions\n (carla.Transform(carla.Location(z=2.5)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15.0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=-8, y = -5 ,z=4), carla.Rotation(pitch=-25.0, yaw=20)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-5.0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid)]\n '''\n self._camera_transforms = [\t#Add different camera positions\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.4, y = -0.3 ,z = 1.4), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.5, y = -0.3 ,z = 1.4), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.6, y = -0.3 ,z = 1.4), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.7, y = -0.3 ,z = 1.4), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid)\n \n # (carla.Transform(carla.Location(x=0,z=20), carla.Rotation(pitch=-90, yaw=-90)), Attachment.Rigid),\n # (carla.Transform(carla.Location(z=2.5)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, z=3), carla.Rotation(pitch=0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-10, z=4), carla.Rotation(pitch=0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, z=4), carla.Rotation(pitch=-25.0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8), carla.Rotation(pitch=0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, y = -5), carla.Rotation(pitch=0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, y = -5 ,z=4), carla.Rotation(pitch=0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, y = -5 ,z=4), carla.Rotation(pitch=-25.0, yaw=0)), Attachment.Rigid),\n # (carla.Transform(carla.Location(x=-8, y = -5 ,z=4), carla.Rotation(pitch=-25.0, yaw=20)), Attachment.Rigid)\n ]\n self.transform_index = 1\n self.sensors = [['sensor.camera.rgb', cc.Raw, 'Camera RGB']]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n #bp.set_attribute('image_size_x', str(hud.dim[0]))\n #bp.set_attribute('image_size_y', str(hud.dim[1])), \n bp.set_attribute('image_size_x', str(self.args_width))\n bp.set_attribute('image_size_y', str(self.args_height))\n bp.set_attribute('fov', '120') #100\n bp.set_attribute('sensor_tick', '0')\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self._transform_index = (self._transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][0] != self.sensors[self.index][0]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self._surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self._transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManagerRGB._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self._recording else 'Off'))\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n# ==============================================================================\n# -- CameraManagerDepth --------------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManagerDepth(object):\n def __init__(self, parent_actor, hud, gamma_correction, args_width, args_height):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n self.args_width = args_width\n self.args_height = args_height\n Attachment = carla.AttachmentType\n self._camera_transforms = [\t#Add different camera positions\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid)\n # (carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15.0)), Attachment.Rigid)\n ]\n self.transform_index = 1\n self.sensors = [\n # ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],\n ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)']\n # ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)']\n ]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n #bp.set_attribute('image_size_x', str(hud.dim[0]))\n #bp.set_attribute('image_size_y', str(hud.dim[1]))\n bp.set_attribute('image_size_x', str(self.args_width))\n bp.set_attribute('image_size_y', str(self.args_height))\n bp.set_attribute('fov', '120') #100\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][0] != self.sensors[self.index][0]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self.surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self.transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManagerDepth._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n# ==============================================================================\n# -- CameraManagerSemantic -----------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManagerSemantic(object):\n def __init__(self, parent_actor, hud, gamma_correction, args_width, args_height):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n self.args_width = args_width\n self.args_height = args_height\n Attachment = carla.AttachmentType\n self._camera_transforms = [\t#Add different camera positions\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(z=2.5)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15.0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid)]\n self.transform_index = 1\n # self.sensors = [\n # ['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 'Camera Semantic Segmentation (CityScapes Palette)'],\n # ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)']]\n self.sensors = [\n ['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 'Camera Semantic Segmentation (CityScapes Palette)']]\n\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n bp.set_attribute('image_size_x', str(self.args_width))\n bp.set_attribute('image_size_y', str(self.args_height))\n bp.set_attribute('fov', '120') #100\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self._transform_index = (self._transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][0] != self.sensors[self.index][0]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self.surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self._transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManagerSemantic._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))\n\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n\n# # ==============================================================================\n# # -- CameraManager -------------------------------------------------------------\n# # ==============================================================================\n\n\nclass CameraManager(object):\n def __init__(self, parent_actor, hud, gamma_correction):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n bound_y = 0.5 + self._parent.bounding_box.extent.y\n Attachment = carla.AttachmentType\n self._camera_transforms = [\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid),\n (carla.Transform(carla.Location(x = 0.2, y = -0.3 ,z = 1.25), carla.Rotation(pitch = 0, roll = 0, yaw = 0)), Attachment.Rigid)]\n\n self.transform_index = 1\n self.sensors = [\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],\n ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],\n ['sensor.camera.semantic_segmentation', cc.CityScapesPalette,\n 'Camera Semantic Segmentation (CityScapes Palette)', {}]]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n if item[0].startswith('sensor.camera'):\n bp.set_attribute('image_size_x', str(hud.dim[0]))\n bp.set_attribute('image_size_y', str(hud.dim[1]))\n bp.set_attribute('fov', '120') #100\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n for attr_name, attr_value in item[3].items():\n bp.set_attribute(attr_name, attr_value)\n elif item[0].startswith('sensor.lidar'):\n self.lidar_range = 50\n for attr_name, attr_value in item[3].items():\n bp.set_attribute(attr_name, attr_value)\n if attr_name == 'range':\n self.lidar_range = float(attr_value)\n\n\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self.surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self.transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n if self.sensors[self.index][0].startswith('sensor.lidar'):\n points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))\n points = np.reshape(points, (int(points.shape[0] / 4), 4))\n lidar_data = np.array(points[:, :2])\n lidar_data *= min(self.hud.dim) / (2.0 * self.lidar_range)\n lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])\n lidar_data = np.fabs(lidar_data) # pylint: disable=E1111\n lidar_data = lidar_data.astype(np.int32)\n lidar_data = np.reshape(lidar_data, (-1, 2))\n lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)\n lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)\n lidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n self.surface = pygame.surfarray.make_surface(lidar_img)\n elif self.sensors[self.index][0].startswith('sensor.camera.dvs'):\n # Example of converting the raw_data from a carla.DVSEventArray\n # sensor into a NumPy array and using it as an image\n dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([\n ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))\n dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)\n # Blue is positive, red is negative\n dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255\n self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))\n else:\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n# ==============================================================================\n# -- Timer ---------------------------------------------------------------------\n# ==============================================================================\n\nclass RepeatTimer(Timer):\n def run(self):\n while not self.finished.wait(self.interval):\n self.function(*self.args, **self.kwargs)\n\n# ==============================================================================\n# -- KeyboardControl -----------------------------------------------------------\n# ==============================================================================\n\n\nclass KeyboardControl(object):\n \"\"\"Class that handles keyboard input.\"\"\"\n def __init__(self, world, start_in_autopilot):\n\n self.camera_rendered = 1\n self.transition_timer = world.transition_timer\n\n self._autopilot_enabled = start_in_autopilot\n if isinstance(world.player, carla.Vehicle):\n self._control = carla.VehicleControl()\n self._lights = carla.VehicleLightState.NONE\n world.player.set_autopilot(self._autopilot_enabled)\n world.player.set_light_state(self._lights)\n elif isinstance(world.player, carla.Walker):\n self._control = carla.WalkerControl()\n self._autopilot_enabled = False\n self._rotation = world.player.get_transform().rotation\n else:\n raise NotImplementedError(\"Actor type not supported\")\n self._steer_cache = 0.0\n world.hud.notification(\"Press 'H' or '?' for help.\", seconds=4.0)\n\n self.timer_mode = RepeatTimer(self.transition_timer, lambda:self.change_autonomous_mode(world))\n self.flag_timer = False\n\n\n self.attention = True\n self.flag_attention = True\n self.emergency_break = False\n \n # initialize steering wheel\n pygame.joystick.init()\n\n self.joystick_count = pygame.joystick.get_count()\n if self.joystick_count > 1:\n raise ValueError(\"Please Connect Just One Joystick\")\n # elif self.joystick_count == 0:\n # raise ValueError(\"Please Connect One Joystick\")\n elif self.joystick_count == 1:\n self._joystick = pygame.joystick.Joystick(0)\n self._joystick.init()\n\n self._steer_idx = int(0)\n \n self._throttle_idx = int(1)\n \n self._brake_idx = int(2)\n self._reverse_idx = int(3)\n self._handbrake_idx = int(4)\n\n\n pygame.mixer.init()\n path = 'openface_utils/beep-01a.wav'\n self._change_mode_beep = pygame.mixer.Sound(path)\n # if (start_in_autopilot):\n # world.player.enable_constant_velocity(carla.Vector3D(7, 0, 0)) #Inital velocity contant\n # world.constant_velocity_enabled = True\n self.steer_cmd = 0\n self.brake_cmd = 0\n self.thorttle_cmd = 0\n\n\n\n def begin_timer(self, world):\n self.timer_mode.start()\n self._change_mode_beep.play()\n world.hud.warning_change_drive_mode(self._autopilot_enabled)\n\n def change_autonomous_mode(self, world):\n\n self.flag_timer = False\n self._change_mode_beep.stop() \n \n if ((self._autopilot_enabled == True) and (self.attention == False) and (self.flag_attention == True)):\n self._autopilot_enabled = not self._autopilot_enabled\n world.player.set_autopilot(self._autopilot_enabled)\n self._control.brake = 1.0\n world.player.apply_control(self._control)\n self.flag_attention = False\n self.emergency_break = True\n else:\n self._autopilot_enabled = not self._autopilot_enabled\n world.player.set_autopilot(self._autopilot_enabled)\n\n\n\n\n self.timer_mode.cancel()\n self.timer_mode = RepeatTimer(self.transition_timer, lambda:self.change_autonomous_mode(world))\n\n def parse_events(self, client, world, clock):\n if isinstance(self._control, carla.VehicleControl):\n current_lights = self._lights\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if self._is_quit_shortcut(event.key):\n return True\n elif event.key == K_BACKSPACE:\n if self._autopilot_enabled:\n world.player.set_autopilot(False)\n world.restart()\n world.player.set_autopilot(True)\n else:\n world.restart()\n elif event.key == K_F1:\n world.hud.toggle_info()\n elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):\n world.hud.help.toggle()\n elif event.key == K_TAB:\n if self.camera_rendered == 1: world.camera_manager_rgb.toggle_camera()\n elif self.camera_rendered == 2: world.camera_manager_semantic.toggle_camera()\n elif self.camera_rendered == 4: world.camera_manager_depth.toggle_camera()\n # world.camera_manager.toggle_camera()\n elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_weather(reverse=True)\n elif event.key == K_c:\n world.next_weather()\n elif event.key == K_g:\n world.toggle_radar()\n elif event.key == K_BACKQUOTE:\n world.camera_manager.next_sensor()\n elif event.key == K_n:\n if self.camera_rendered == 1: world.camera_manager_rgb.next_sensor()\n elif self.camera_rendered == 2: world.camera_manager_semantic.next_sensor()\n elif self.camera_rendered == 3: world.camera_manager_lidar.next_sensor()\n elif self.camera_rendered == 4: world.camera_manager_depth.next_sensor()\n elif (event.key > K_0 and event.key <= K_4) or (event.key > K_KP0 and event.key <= K_KP4):\n #world.camera_manager_rgb.set_sensor(event.key - 1 - K_0)\n if event.key < 60: self.camera_rendered = event.key-48 \n else: self.camera_rendered = event.key-256 \n\n # world.camera_manager.next_sensor()\n elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL):\n if world.constant_velocity_enabled:\n world.player.disable_constant_velocity()\n world.constant_velocity_enabled = False\n world.hud.notification(\"Disabled Constant Velocity Mode\")\n else:\n world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0))\n world.constant_velocity_enabled = True\n world.hud.notification(\"Enabled Constant Velocity Mode at 60 km/h\")\n elif event.key > K_0 and event.key <= K_9:\n world.camera_manager.set_sensor(event.key - 1 - K_0)\n elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):\n world.camera_manager.toggle_recording()\n elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):\n if (world.recording_enabled):\n client.stop_recorder()\n world.recording_enabled = False\n world.hud.notification(\"Recorder is OFF\")\n else:\n client.start_recorder(\"manual_recording.rec\")\n world.recording_enabled = True\n world.hud.notification(\"Recorder is ON\")\n elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):\n # stop recorder\n client.stop_recorder()\n world.recording_enabled = False\n # work around to fix camera at start of replaying\n current_index = world.camera_manager.index\n world.destroy_sensors()\n # disable autopilot\n self._autopilot_enabled = False\n world.player.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\"Replaying file 'manual_recording.rec'\")\n # replayer\n client.replay_file(\"manual_recording.rec\", world.recording_start, 0, 0)\n world.camera_manager.set_sensor(current_index)\n elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start -= 10\n else:\n world.recording_start -= 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start += 10\n else:\n world.recording_start += 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n if isinstance(self._control, carla.VehicleControl):\n if event.key == K_q:\n self._control.gear = 1 if self._control.reverse else -1\n elif event.key == K_m:\n self._control.manual_gear_shift = not self._control.manual_gear_shift\n self._control.gear = world.player.get_control().gear\n world.hud.notification('%s Transmission' %\n ('Manual' if self._control.manual_gear_shift else 'Automatic'))\n elif self._control.manual_gear_shift and event.key == K_COMMA:\n self._control.gear = max(-1, self._control.gear - 1)\n elif self._control.manual_gear_shift and event.key == K_PERIOD:\n self._control.gear = self._control.gear + 1\n elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:\n self.begin_timer(world)\n elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:\n current_lights ^= carla.VehicleLightState.Special1\n elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:\n current_lights ^= carla.VehicleLightState.HighBeam\n elif event.key == K_l:\n # Use 'L' key to switch between lights:\n # closed -> position -> low beam -> fog\n if not self._lights & carla.VehicleLightState.Position:\n world.hud.notification(\"Position lights\")\n current_lights |= carla.VehicleLightState.Position\n else:\n world.hud.notification(\"Low beam lights\")\n current_lights |= carla.VehicleLightState.LowBeam\n if self._lights & carla.VehicleLightState.LowBeam:\n world.hud.notification(\"Fog lights\")\n current_lights |= carla.VehicleLightState.Fog\n if self._lights & carla.VehicleLightState.Fog:\n world.hud.notification(\"Lights off\")\n current_lights ^= carla.VehicleLightState.Position\n current_lights ^= carla.VehicleLightState.LowBeam\n current_lights ^= carla.VehicleLightState.Fog\n elif event.key == K_i:\n current_lights ^= carla.VehicleLightState.Interior\n elif event.key == K_z:\n current_lights ^= carla.VehicleLightState.LeftBlinker\n elif event.key == K_x:\n current_lights ^= carla.VehicleLightState.RightBlinker\n\n if not self._autopilot_enabled:\n parse_control = True\n if isinstance(self._control, carla.VehicleControl):\n if self.joystick_count > 0:\n self._parse_vehicle_wheel(parse_control)\n else:\n self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n self._control.reverse = self._control.gear < 0\n # Set automatic control-related vehicle lights\n if self._control.brake:\n current_lights |= carla.VehicleLightState.Brake\n else: # Remove the Brake flag\n current_lights &= ~carla.VehicleLightState.Brake\n if self._control.reverse:\n current_lights |= carla.VehicleLightState.Reverse\n else: # Remove the Reverse flag\n current_lights &= ~carla.VehicleLightState.Reverse\n if current_lights != self._lights: # Change the light state only if necessary\n self._lights = current_lights\n world.player.set_light_state(carla.VehicleLightState(self._lights))\n elif isinstance(self._control, carla.WalkerControl):\n self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)\n\n if (self.flag_attention == True): \n world.player.apply_control(self._control)\n else:\n world.hud.emergency_break() \n self._control.brake = 1.0\n world.player.apply_control(self._control)\n else: #Save steer values\n parse_control = False\n if self.joystick_count > 0:\n self._parse_vehicle_wheel(parse_control)\n\n\n def _parse_vehicle_keys(self, keys, milliseconds):\n if keys[K_UP] or keys[K_w]:\n self._control.throttle = min(self._control.throttle + 0.01, 1)\n else:\n self._control.throttle = 0.0\n\n if keys[K_DOWN] or keys[K_s]:\n self._control.brake = min(self._control.brake + 0.2, 1)\n else:\n self._control.brake = 0\n\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n if self._steer_cache > 0:\n self._steer_cache = 0\n else:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n if self._steer_cache < 0:\n self._steer_cache = 0\n else:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.7, max(-0.7, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.hand_brake = keys[K_SPACE]\n\n def _parse_vehicle_wheel(self, parse_control):\n numAxes = self._joystick.get_numaxes()\n jsInputs = [float(self._joystick.get_axis(i)) for i in range(numAxes)]\n # print (jsInputs)\n jsButtons = [float(self._joystick.get_button(i)) for i in\n range(self._joystick.get_numbuttons())]\n\n # Custom function to map range of inputs [1, -1] to outputs [0, 1] i.e 1 from inputs means nothing is pressed\n # For the steering, it seems fine as it is\n K1 = 0.53 # 0.55\n steerCmd = K1 * math.tan(1.1 * jsInputs[self._steer_idx])\n\n K2 = 1.6 # 1.6\n throttleCmd = K2 + (2.05 * math.log10(\n -0.7 * jsInputs[self._throttle_idx] + 1.4) - 1.2) / 0.92\n if throttleCmd <= 0:\n throttleCmd = 0\n elif throttleCmd > 1:\n throttleCmd = 1\n\n brakeCmd = 1.6 + (2.05 * math.log10(\n -0.7 * jsInputs[self._brake_idx] + 1.4) - 1.2) / 0.92\n if brakeCmd <= 0:\n brakeCmd = 0\n elif brakeCmd > 1:\n brakeCmd = 1\n\n # print(self._control.throttle, \" \", throttleCmd)\n if (parse_control == True):\n self._control.steer = steerCmd\n self._control.brake = brakeCmd\n self._control.throttle = throttleCmd\n\n #toggle = jsButtons[self._reverse_idx]\n\n self._control.hand_brake = bool(jsButtons[self._handbrake_idx])\n self.steer_cmd = steerCmd\n self.brake_cmd = brakeCmd\n self.thorttle_cmd = throttleCmd\n\n def _parse_walker_keys(self, keys, milliseconds, world):\n self._control.speed = 0.0\n if keys[K_DOWN] or keys[K_s]:\n self._control.speed = 0.0\n if keys[K_LEFT] or keys[K_a]:\n self._control.speed = .01\n self._rotation.yaw -= 0.08 * milliseconds\n if keys[K_RIGHT] or keys[K_d]:\n self._control.speed = .01\n self._rotation.yaw += 0.08 * milliseconds\n if keys[K_UP] or keys[K_w]:\n self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed\n self._control.jump = keys[K_SPACE]\n self._rotation.yaw = round(self._rotation.yaw, 1)\n self._control.direction = self._rotation.get_forward_vector()\n\n @staticmethod\n def _is_quit_shortcut(key):\n return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)\n\n\n# ==============================================================================\n# -- HUD -----------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HUD(object):\n def __init__(self, width, height):\n self.dim = (width, height)\n font = pygame.font.Font(pygame.font.get_default_font(), 20)\n font2 = pygame.font.Font(pygame.font.get_default_font(), 30)\n font_name = 'courier' if os.name == 'nt' else 'mono'\n fonts = [x for x in pygame.font.get_fonts() if font_name in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)\n self._notifications = FadingText(font, (width, 40), (0, height - 40))\n self._notifications_permanent = FadingText(font, (200, 40), ((width * 2/3) - 200 , 0))\n self._notifications_warning = FadingText(font, (500, 40), (width - (width / 2) - 275 , 100))\n self._notifications_emergency= FadingText(font2, (340, 50), (width - (width / 2) - 150 , 100))\n self.help = HelpText(pygame.font.Font(mono, 16), width, height)\n self.server_fps = 0\n self.frame = 0\n self.simulation_time = 0\n self._show_info = True\n self._info_text = []\n self._server_clock = pygame.time.Clock()\n\n\n def on_world_tick(self, timestamp):\n self._server_clock.tick()\n self.server_fps = self._server_clock.get_fps()\n self.frame = timestamp.frame\n self.simulation_time = timestamp.elapsed_seconds\n\n def tick(self, world, clock):\n self._notifications.tick(world, clock)\n self._notifications_permanent.tick(world, clock)\n self._notifications_emergency.tick(world, clock)\n self._notifications_warning.tick(world, clock)\n self.transition_timer = world.transition_timer\n\n self.drive_mode_display() \n if not self._show_info:\n return\n t = world.player.get_transform()\n v = world.player.get_velocity()\n c = world.player.get_control()\n compass = world.imu_sensor.compass\n heading = 'N' if compass > 270.5 or compass < 89.5 else ''\n heading += 'S' if 90.5 < compass < 269.5 else ''\n heading += 'E' if 0.5 < compass < 179.5 else ''\n heading += 'W' if 180.5 < compass < 359.5 else ''\n colhist = world.collision_sensor.get_collision_history()\n collision = [colhist[x + self.frame - 200] for x in range(0, 200)]\n max_col = max(1.0, max(collision))\n collision = [x / max_col for x in collision]\n vehicles = world.world.get_actors().filter('vehicle.*')\n self._info_text = [\n 'Server: % 16.0f FPS' % self.server_fps,\n 'Client: % 16.0f FPS' % clock.get_fps(),\n '',\n 'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),\n 'Map: % 20s' % world.map.name,\n 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),\n '',\n 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),\n u'Compass:% 17.0f\\N{DEGREE SIGN} % 2s' % (compass, heading),\n 'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),\n 'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),\n 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),\n 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),\n 'Height: % 18.0f m' % t.location.z,\n '']\n if isinstance(c, carla.VehicleControl):\n self._info_text += [\n ('Throttle:', c.throttle, 0.0, 1.0),\n ('Steer:', c.steer, -1.0, 1.0),\n ('Brake:', c.brake, 0.0, 1.0),\n ('Reverse:', c.reverse),\n ('Hand brake:', c.hand_brake),\n ('Manual:', c.manual_gear_shift),\n 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]\n elif isinstance(c, carla.WalkerControl):\n self._info_text += [\n ('Speed:', c.speed, 0.0, 5.556),\n ('Jump:', c.jump)]\n self._info_text += [\n '',\n 'Collision:',\n collision,\n '',\n 'Number of vehicles: % 8d' % len(vehicles)]\n if len(vehicles) > 1:\n self._info_text += ['Nearby vehicles:']\n distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)\n vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id]\n for d, vehicle in sorted(vehicles, key=lambda vehicles: vehicles[0]):\n if d > 200.0:\n break\n vehicle_type = get_actor_display_name(vehicle, truncate=22)\n self._info_text.append('% 4dm %s' % (d, vehicle_type))\n \n \n def toggle_info(self):\n self._show_info = not self._show_info\n\n def notification(self, text, seconds=2.0):\n self._notifications.set_text(text, seconds=seconds)\n\n def error(self, text):\n self._notifications.set_text('Error: %s' % text, (255, 0, 0))\n\n def render(self, display):\n if self._show_info:\n info_surface = pygame.Surface((220, self.dim[1]))\n info_surface.set_alpha(100)\n display.blit(info_surface, (0, 0))\n v_offset = 4\n bar_h_offset = 100\n bar_width = 106\n for item in self._info_text:\n if v_offset + 18 > self.dim[1]:\n break\n if isinstance(item, list):\n if len(item) > 1:\n points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]\n pygame.draw.lines(display, (255, 136, 0), False, points, 2)\n item = None\n v_offset += 18\n elif isinstance(item, tuple):\n if isinstance(item[1], bool):\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))\n pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)\n else:\n rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect_border, 1)\n f = (item[1] - item[2]) / (item[3] - item[2])\n if item[2] < 0.0:\n rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))\n else:\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect)\n item = item[0]\n if item: # At this point has to be a str.\n surface = self._font_mono.render(item, True, (255, 255, 255))\n display.blit(surface, (8, v_offset))\n v_offset += 18\n self._notifications.render(display)\n self._notifications_permanent.render(display)\n self._notifications_warning.render(display)\n self._notifications_emergency.render(display)\n self.help.render(display)\n\n def warning_change_drive_mode(self, autopilot):\n if (autopilot):\n text = 'Manual driving mode will be set in ' + str(self.transition_timer) + ' seconds'\n else:\n text = 'Autonomous driving mode will be set in ' + str(self.transition_timer) + ' seconds'\n\n self._notifications_warning.set_text(text, seconds = self.transition_timer)\n\n def drive_mode_display(self):\n if (self.autopilot_enabled):\n text = 'Autonomous mode'\n else:\n text = 'Manual mode'\n # print(text)\n self._notifications_permanent.set_text(text, seconds = 1)\n \n def emergency_break(self):\n text = 'EMERGENCY BREAK'\n color=(255, 0, 0)\n self._notifications_emergency.set_text(text, color = color, seconds = 1)\n# ==============================================================================\n# -- FadingText ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass FadingText(object):\n def __init__(self, font, dim, pos):\n self.font = font\n self.dim = dim\n self.pos = pos\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n\n def set_text(self, text, color=(255, 255, 255), seconds=2.0):\n text_texture = self.font.render(text, True, color)\n self.surface = pygame.Surface(self.dim)\n self.seconds_left = seconds\n self.surface.fill((0, 0, 0, 0))\n self.surface.blit(text_texture, (10, 11))\n \n def tick(self, _, clock):\n delta_seconds = 1e-3 * clock.get_time()\n self.seconds_left = max(0.0, self.seconds_left - delta_seconds)\n self.surface.set_alpha(500.0 * self.seconds_left)\n\n def render(self, display):\n display.blit(self.surface, self.pos)\n\n# ==============================================================================\n# -- HelpText ------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HelpText(object):\n \"\"\"Helper class to handle text output using pygame\"\"\"\n def __init__(self, font, width, height):\n lines = __doc__.split('\\n')\n self.font = font\n self.line_space = 18\n self.dim = (780, len(lines) * self.line_space + 12)\n self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n self.surface.fill((0, 0, 0, 0))\n for n, line in enumerate(lines):\n text_texture = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text_texture, (22, n * self.line_space))\n self._render = False\n self.surface.set_alpha(220)\n\n def toggle(self):\n self._render = not self._render\n\n def render(self, display):\n if self._render:\n display.blit(self.surface, self.pos)\n\n# ==============================================================================\n# -- Global functions ----------------------------------------------------------\n# ==============================================================================\n\n\ndef find_weather_presets():\n rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]\n\n\ndef get_actor_display_name(actor, truncate=250):\n name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])\n return (name[:truncate - 1] + u'\\u2026') if len(name) > truncate else name\n\n\n\n# ==============================================================================\n# -- CollisionSensor -----------------------------------------------------------\n# ==============================================================================\n\n\nclass CollisionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self.history = []\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.collision')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n def get_collision_history(self):\n history = collections.defaultdict(int)\n for frame, intensity in self.history:\n history[frame] += intensity\n return history\n\n @staticmethod\n def _on_collision(weak_self, event):\n self = weak_self()\n if not self:\n return\n actor_type = get_actor_display_name(event.other_actor)\n self.hud.notification('Collision with %r' % actor_type)\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)\n self.history.append((event.frame, intensity))\n if len(self.history) > 4000:\n self.history.pop(0)\n\n\n# ==============================================================================\n# -- LaneInvasionSensor --------------------------------------------------------\n# ==============================================================================\n\n\nclass LaneInvasionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.lane_invasion')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))\n\n @staticmethod\n def _on_invasion(weak_self, event):\n self = weak_self()\n if not self:\n return\n lane_types = set(x.type for x in event.crossed_lane_markings)\n text = ['%r' % str(x).split()[-1] for x in lane_types]\n self.hud.notification('Crossed line %s' % ' and '.join(text))\n\n\n# ==============================================================================\n# -- GnssSensor ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass GnssSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.lat = 0.0\n self.lon = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.gnss')\n self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event))\n\n @staticmethod\n def _on_gnss_event(weak_self, event):\n self = weak_self()\n if not self:\n return\n self.lat = event.latitude\n self.lon = event.longitude\n\n\n# ==============================================================================\n# -- IMUSensor -----------------------------------------------------------------\n# ==============================================================================\n\n\nclass IMUSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.accelerometer = (0.0, 0.0, 0.0)\n self.gyroscope = (0.0, 0.0, 0.0)\n self.compass = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.imu')\n self.sensor = world.spawn_actor(\n bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))\n\n @staticmethod\n def _IMU_callback(weak_self, sensor_data):\n self = weak_self()\n if not self:\n return\n limits = (-99.9, 99.9)\n self.accelerometer = (\n max(limits[0], min(limits[1], sensor_data.accelerometer.x)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.y)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.z)))\n self.gyroscope = (\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))\n self.compass = math.degrees(sensor_data.compass)\n\n\n# ==============================================================================\n# -- RadarSensor ---------------------------------------------------------------\n# ==============================================================================\n\n\nclass RadarSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.velocity_range = 7.5 # m/s\n world = self._parent.get_world()\n self.debug = world.debug\n bp = world.get_blueprint_library().find('sensor.other.radar')\n bp.set_attribute('horizontal_fov', str(35))\n bp.set_attribute('vertical_fov', str(20))\n self.sensor = world.spawn_actor(\n bp,\n carla.Transform(\n carla.Location(x=2.8, z=1.0),\n carla.Rotation(pitch=5)),\n attach_to=self._parent)\n # We need a weak reference to self to avoid circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data))\n\n @staticmethod\n def _Radar_callback(weak_self, radar_data):\n self = weak_self()\n if not self:\n return\n # To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:\n # points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))\n # points = np.reshape(points, (len(radar_data), 4))\n\n current_rot = radar_data.transform.rotation\n for detect in radar_data:\n azi = math.degrees(detect.azimuth)\n alt = math.degrees(detect.altitude)\n # The 0.25 adjusts a bit the distance so the dots can\n # be properly seen\n fw_vec = carla.Vector3D(x=detect.depth - 0.25)\n carla.Transform(\n carla.Location(),\n carla.Rotation(\n pitch=current_rot.pitch + alt,\n yaw=current_rot.yaw + azi,\n roll=current_rot.roll)).transform(fw_vec)\n\n def clamp(min_v, max_v, value):\n return max(min_v, min(value, max_v))\n\n norm_velocity = detect.velocity / self.velocity_range # range [-1, 1]\n r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)\n g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)\n b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)\n self.debug.draw_point(\n radar_data.transform.location + fw_vec,\n size=0.075,\n life_time=0.06,\n persistent_lines=False,\n color=carla.Color(r, g, b))\n" ]
[ [ "numpy.reshape", "numpy.dtype", "numpy.array", "numpy.zeros", "numpy.fabs" ] ]
hvanwyk/quadmesh
[ "df4676dd469c2d3539443156df260e027e1bf871" ]
[ "tests/test_mesh/test_quadcell.py" ]
[ "from mesh import Vertex, HalfEdge, QuadCell\nfrom mesh import convert_to_array\nfrom assembler import GaussRule\nfrom plot import Plot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport unittest\n\n\n\n\nclass TestQuadCell(unittest.TestCase):\n \"\"\"\n Test QuadCell Class\n \"\"\" \n def test_constructor(self):\n # Check the right number of halfedges\n self.assertRaises(Exception, QuadCell, *([1,2,2,2,2]))\n \n # Rectangle\n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((1,1))\n v4 = Vertex((0,1))\n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h34 = HalfEdge(v3, v4)\n h41 = HalfEdge(v4, v1)\n \n cell = QuadCell([h12, h23, h34, h41])\n self.assertTrue(cell.is_rectangle())\n\n \n def test_is_rectangle(self):\n #\n # Rectangle\n #\n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((1,1))\n v4 = Vertex((0,1))\n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h34 = HalfEdge(v3, v4)\n h41 = HalfEdge(v4, v1)\n \n cell = QuadCell([h12, h23, h34, h41])\n \n # Check cell\n self.assertTrue(cell.is_rectangle())\n \n cell.split()\n \n # Check child\n self.assertTrue(cell.get_child(0).is_rectangle())\n \n #\n # Not a rectangle\n # \n v5 = Vertex((2,2))\n h25 = HalfEdge(v2, v5)\n h54 = HalfEdge(v5, v4)\n \n cell = QuadCell([h12, h25, h54, h41])\n \n # Check cell\n self.assertFalse(cell.is_rectangle())\n \n cell.split()\n \n # Check child\n self.assertFalse(cell.get_child(0).is_rectangle())\n \n \n def test_split(self):\n # Rectangle\n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((1,1))\n v4 = Vertex((0,1))\n v5 = Vertex((2,0))\n v6 = Vertex((2,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h34 = HalfEdge(v3, v4)\n h41 = HalfEdge(v4, v1)\n \n cell = QuadCell([h12, h23, h34, h41])\n \n cell.split()\n \n self.assertTrue(cell.has_children())\n \n # Check that interior half_edges are twinned\n child_0 = cell.get_child(0)\n child_1 = cell.get_child(1)\n self.assertEqual(child_0.get_half_edge(1).twin(), \\\n child_1.get_half_edge(3))\n \n # Make another cell, check that it is a neighbor, and then split it \n h25 = HalfEdge(v2, v5)\n h56 = HalfEdge(v5, v6)\n h63 = HalfEdge(v6, v3)\n h32 = h23.make_twin()\n \n cell_1 = QuadCell([h25, h56, h63, h32])\n \n # Check that they are neighbors\n self.assertEqual(cell_1.get_neighbors(h32),cell)\n \n # Child_s doesn't have a neighbor\n self.assertIsNone(child_1.get_neighbors(child_1.get_half_edge(1)))\n \n cell_1.split()\n \n # Now the child has a neighbor\n self.assertEqual(child_1.get_neighbors(child_1.get_half_edge(1)),\n cell_1.get_child(0))\n \n \n def test_locate_point(self):\n v_sw = Vertex((0,0))\n v_se = Vertex((3,1))\n v_ne = Vertex((2,3))\n v_nw = Vertex((-1,1))\n \n h12 = HalfEdge(v_sw, v_se)\n h23 = HalfEdge(v_se, v_ne)\n h34 = HalfEdge(v_ne, v_nw)\n h41 = HalfEdge(v_nw, v_sw)\n cell = QuadCell([h12,h23,h34,h41])\n\n \n points = np.random.rand(5,2)\n \n \n def test_bin_points(self):\n #\n # Cell vertices\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((1,1))\n v4 = Vertex((0,1))\n \n # Cell HalfEdges\n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h34 = HalfEdge(v3, v4)\n h41 = HalfEdge(v4, v1)\n \n # Cell\n cell = QuadCell([h12, h23, h34, h41])\n\n # Split cell twice\n cell.split()\n cell.get_child(1).split()\n \n #\n # Error: point not in cell\n #\n x = np.array([[-1,-1]]) \n self.assertRaises(Exception, cell.bin_points, *(x,))\n \n #\n # Corner points\n #\n x = convert_to_array([v1,v2,v3,v4])\n bins = cell.bin_points(x)\n \n # There should be four bins\n self.assertEqual(len(bins),4)\n \n # No binning cells should have children\n for c, dummy in bins:\n self.assertFalse(c.has_children()) \n \n #\n # Center point\n # \n x = np.array([[0.5,0.5]])\n bins = cell.bin_points(x)\n self.assertEqual(len(bins),1)\n \n #\n # Mark \n # \n sf = '1'\n for child in cell.get_children():\n child.mark(sf)\n \n x = np.array([[0.75,0.25]])\n bins = cell.bin_points(x, subforest_flag=sf)\n for c, dummy in bins:\n self.assertTrue(c.is_marked(sf))\n self.assertFalse(c.has_children(flag=sf))\n \n \n def test_reference_map(self):\n v_sw = Vertex((0,0))\n v_se = Vertex((3,1))\n v_ne = Vertex((2,3))\n v_nw = Vertex((-1,1))\n \n h12 = HalfEdge(v_sw, v_se)\n h23 = HalfEdge(v_se, v_ne)\n h34 = HalfEdge(v_ne, v_nw)\n h41 = HalfEdge(v_nw, v_sw)\n cell = QuadCell([h12,h23,h34,h41])\n \n #\n # Map corner vertices of reference cell to physical vertices\n #\n y_refs = np.array([[0,0],[1,0],[1,1],[0,1]])\n x = list(convert_to_array(cell.get_vertices()))\n x_phys = cell.reference_map(list(y_refs))\n\n self.assertTrue(np.allclose(np.array(x),x_phys),\\\n 'Mapped vertices should coincide '+\\\n 'with physical cell vertices.')\n \n #\n # Jacobian: Area of cell by integration\n # \n rule_2d = GaussRule(order=4, shape='quadrilateral')\n r = rule_2d.nodes()\n wg = rule_2d.weights()\n dummy, mg = cell.reference_map(list(r), jac_r2p=True)\n jac = mg['jac_r2p']\n area = 0\n for i in range(4):\n j = jac[i]\n w = wg[i]\n area += np.abs(np.linalg.det(j))*w\n self.assertAlmostEqual(cell.area(), area, 7,\\\n 'Area computed via numerical quadrature '+\\\n 'not close to actual area')\n #\n # Try different formats\n # \n # Array\n x = np.array(x)\n x_ref = cell.reference_map(x, mapsto='reference')\n self.assertTrue(np.allclose(y_refs, np.array(x_ref)),\\\n 'Map array to reference: incorrect output.')\n # Single point\n x = x[0,:]\n x_ref = cell.reference_map(x, mapsto='reference')\n self.assertTrue(np.allclose(x, x_ref))\n \n #\n # Map corner vertices to reference points\n #\n x = convert_to_array(cell.get_vertices())\n y = cell.reference_map(x, mapsto='reference')\n self.assertTrue(np.allclose(y, y_refs), \\\n 'Corner vertices should map '+\\\n 'onto (0,0),(1,0),(1,1),(0,1).')\n \n #\n # Map random points in [0,1]^2 onto cell and back again\n # \n # Generate random points\n t = np.random.rand(5)\n s = np.random.rand(5)\n x = np.array([s,t]).T\n \n # Map to physical cell\n x_phy = cell.reference_map(x)\n \n # Check whether points are contained in cell\n in_cell = cell.contains_points(x_phy)\n self.assertTrue(all(in_cell), \\\n 'All points mapped from [0,1]^2 '+\\\n 'should be contained in the cell.')\n \n # Map back to reference cell\n x_ref = cell.reference_map(x_phy, mapsto='reference')\n self.assertTrue(np.allclose(np.array(x_ref), np.array(x)),\\\n 'Points mapped to physical cell and back should '+\\\n 'be unchanged.')\n \n\n #\n # Compute the hessian and compare with finite difference approximation \n #\n h = 1e-8\n x = np.array([[0.5, 0.5],[0.5+h,0.5],[0.5-h,0.5],\n [0.5,0.5+h],[0.5,0.5-h]])\n \n x_ref, mg = cell.reference_map(x, mapsto='reference',\n hess_p2r=True, jac_p2r=True)\n J = mg['jac_p2r']\n H = mg['hess_p2r']\n \n # sxx\n sxx_fd = (J[1][0,0]-J[2][0,0])/(2*h)\n sxx = H[0][0,0,0] \n self.assertAlmostEqual(sxx_fd, sxx, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation')\n \n \n # syx\n syx_fd = (J[1][0,1]-J[2][0,1])/(2*h)\n sxy = H[0][0,1,0]\n syx = H[0][1,0,0]\n self.assertAlmostEqual(sxy, syx, 7, 'Mixed derivatives not equal.')\n self.assertAlmostEqual(syx_fd, sxy, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation')\n \n # syy\n syy_fd = (J[3][0,1]-J[4][0,1])/(2*h)\n syy = H[0][1,1,0]\n self.assertAlmostEqual(syy_fd, syy, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation')\n\n # txx\n txx_fd = (J[1][1,0]-J[2][1,0])/(2*h)\n txx = H[0][0,0,1]\n self.assertAlmostEqual(txx_fd, txx, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation')\n \n # txy\n txy_fd = (J[3][1,0]-J[4][1,0])/(2*h)\n txy = H[0][0,1,1]\n tyx = H[0][1,0,1]\n self.assertAlmostEqual(txy, tyx, 7, 'Mixed derivatives not equal.')\n self.assertAlmostEqual(txy_fd, txy, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation')\n \n # tyy\n tyy_fd = (J[3][1,1]-J[4][1,1])/(2*h)\n tyy = H[0][1,1,1] \n self.assertAlmostEqual(tyy_fd, tyy, 7, \\\n 'Hessian calculation not close to '+\\\n 'finite difference approximation') \n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testNode']\n unittest.main() " ]
[ [ "numpy.linalg.det", "numpy.array", "numpy.random.rand", "numpy.allclose" ] ]
softwaresaved/international-survey-lib
[ "79755294ecd6c6bd740cff4094b8a2417f6ede92" ]
[ "plotting.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .likertScalePlot import likert_scale, get_colors\n\n\ndef plot_numeric_var(df):\n \"\"\"\n \"\"\"\n print(df.describe())\n n_bins = 40\n y_label = 'Frequencies'\n # Get the first column name of the df to label the x-axis. This plot expects only one columns\n x_label = df.columns.values.tolist()[0]\n\n fig, ax = plt.subplots()\n # ax = remove_to_right_line(ax)\n ax.set_ylabel(y_label)\n ax.set_xlabel(x_label)\n ax.hist(df.dropna().values, n_bins, normed=False, edgecolor='white', linewidth=1, color=\"#3F5D7D\")\n min_value = int(math.floor(min(df.dropna().values)))\n max_value = int(math.ceil(max(df.dropna().values)))\n step = int(math.ceil((max_value - min_value) / n_bins))\n plt.xticks(np.arange(min_value, max_value +1, step))\n\n return ax\n\n\ndef bar_plot(df, colormap, horizontal=False):\n \"\"\"\n \"\"\"\n # Get the color palette\n # colors = [colormap(np.arange(len(df)))]\n colors = get_colors(df, colormap, axis=0)\n width=0.8\n if horizontal:\n ax = df.plot.barh(label='index', width=width, color=colors)\n else:\n ax = df.plot.bar(label='index', width=width, color=colors)\n return ax\n\n\ndef plot_y_n_single(df, colormap):\n \"\"\"\n \"\"\"\n width=0.8\n # Take the colors associate to yes and no\n # colors = [np.array((colormap(0), colormap(3)))]\n colors = []\n if df.iloc[0].loc['Yes'] > 0:\n colors.append(colormap(0))\n if df.iloc[0].loc['No'] > 0:\n colors.append(colormap(3))\n ax = df.transpose().plot.bar(label='index', width=width, color=colors, fontsize=14)\n return ax\n\n\ndef stacked_y_n(df, colormap):\n \"\"\"\n Plotting Y-N values as stacked bars when passed several questions at the same time.\n If want to plot single question Y-N see plot_single_y_n()\n :params:\n :df pd.df(): dataframe containing the data, should be a df of frequencies\n created with crosstab\n :sort_order bool(): to order the value by the number of yes\n :horizontal bool(): to plot the bar horizontal rather than vertical (Default behaviour)\n :legend bool(): to show the legend or not\n :set_labels bool(): to add labels on the individuals bars\n :set_n bool(): to show the total n for each items\n\n :return:\n :fig matplotlib.plt.plot(): Return the plot itself\n \"\"\"\n width=0.8\n # Take the colors associate to yes and no\n colors = [colormap(0), colormap(3)]\n ax = df.plot.bar(width=width, color=colors, stacked=True)\n return ax\n\n\ndef ranking_plot(df, colormap):\n \"\"\"\n \"\"\"\n width = 0.8\n colors = colormap(np.arange(len(df)))\n ax = df.plot.bar(color=colors, stacked=True, width=width, fontsize=14)\n return ax\n\n\ndef likert_plot(df):\n df = df.transpose()\n ax = likert_scale(df)\n return ax\n\n\ndef get_plot(df, type_question, title_plot=False, dropna=True):\n \"\"\"\n \"\"\"\n colormap = plt.cm.tab20\n legend = None\n x_label = False\n wrap_label = False\n y_label = True\n dropna=True\n # Remove any [PERCENTAGE] strings from either the columns names or the row index name\n # remove for the columns\n try:\n df = df.rename(columns={col: col.replace('[PERCENTAGE]', '') for col in df.columns})\n # Remove for the row index\n df = df.rename(index={col: col.replace('[PERCENTAGE]', '') for col in df.index})\n except AttributeError: # In case of numpy number in freenumeric case\n pass\n\n try:\n if type_question.lower() == 'one choice' or type_question.lower() == 'multiple choices':\n # Round the df to avoid having the column of those lower than 1 percent being showed\n df = df.round()\n ax = bar_plot(df, colormap)\n legend = False\n x_label = True\n\n elif type_question.lower() == 'y/n/na':\n if len(df.index) == 1:\n # Round the df to avoid having the column of those lower than 1 percent being showed\n df = df.round()\n df.sort_values(by='Yes', inplace=True, ascending=False)\n ax = plot_y_n_single(df, colormap)\n legend = False\n else:\n ax = stacked_y_n(df, colormap)\n legend = True\n x_label = True\n wrap_label = True\n\n elif type_question.lower() == 'ranking':\n ax = ranking_plot(df, colormap)\n legend = True\n wrap_label = True\n\n elif type_question.lower() == 'likert':\n # Way to check if the likert question here as only one question\n # In that case, it plot a normal barplot\n if len(df.columns) == 1:\n df = df.round()\n ax = bar_plot(df, colormap)\n legend = False\n x_label = True\n dropna = False\n else:\n ax = likert_plot(df)\n y_label = False\n\n elif type_question.lower() == 'freenumeric':\n ax = plot_numeric_var(df)\n\n cosmetic_changes_plot(df, ax, legend=legend, x_label=x_label, wrap_label=wrap_label, y_label=y_label, dropna=dropna)\n\n except TypeError: # In Case an empty v_count is passed\n return None\n\n\ndef cosmetic_changes_plot(df, ax, legend, x_label, wrap_label, y_label, dropna):\n \"\"\"\n Get the plot and return a modified one to have some\n cosmetic changes\n \"\"\"\n\n # Remove the upper and right line\n remove_to_right_line(ax)\n\n # Set up legends\n setup_legend(ax, legend)\n # Add appropriate title\n add_title(df)\n #\n # # Add appropriate x labels\n if x_label is True:\n add_x_labels(df, wrap_label, dropna)\n #\n # # Add appropriate y labels\n if y_label:\n add_y_label(ax)\n\n # Remove the xlabel title\n ax.set_xlabel('')\n\n return ax\n\n\n# def add_labels():\n#\n# # set individual bar lables using above list\n# for i in ax.patches:\n# # get_width pulls left or right; get_y pushes up or down\n# ax.text(i.get_width()+700, i.get_y()+.18, \\\n# str(round((i.get_width()), 2)), fontsize=11, color='dimgrey')\n\n\ndef add_title(df):\n plt.title(df.index.name, fontsize=16)\n\n\ndef setup_legend(ax, legend):\n if legend is True:\n plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n elif legend is False:\n ax.legend().set_visible(False)\n else:\n pass\n\n\ndef remove_to_right_line(ax):\n \"\"\"\n Remove the top and the right axis\n \"\"\"\n # Ensure that the axis ticks only show up on the bottom and left of the plot.\n # Ticks on the right and top of the plot are generally unnecessary chartjunk.\n # Hide the right and top spines\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n\n\ndef add_y_label(ax):\n ax.set_ylabel('Percentage', fontsize=14)\n # plt.yticks(np.arange(0, 100, 10))\n\n\ndef add_x_labels(df, wrap_label, dropna):\n\n def wrap_labels(label, max_size=20):\n \"\"\"\n Function to automatically wrap labels if they are too long\n Split only if whitespace\n params:\n :labels str(): string that contains the labels\n :max_size int(): 20 by Default, the size of the string\n before being wrapped\n :return:\n :str() of wrapped labels according to the max size\n \"\"\"\n def split_at_whitespace(label):\n label_to_return = list()\n n = 0\n for letter in label:\n n +=1\n if n >= max_size:\n if letter == ' ':\n letter = '\\n'\n n = 0\n label_to_return.append(letter)\n return ''.join(label_to_return)\n\n return split_at_whitespace(label)\n # Add the x-labels\n # To set up the label on x or y axis\n if dropna is True:\n # remove the labels that have a value of zero\n if wrap_label:\n label_txt = [wrap_labels(label) for i, label in enumerate(df.index) if df.ix[i, 0] >= 1]\n else:\n label_txt = [label for i, label in enumerate(df.index) if df.ix[i, 0] >= 1]\n else:\n\n # remove the labels that have a value of zero\n if wrap_label:\n label_txt = [wrap_labels(label) for i, label in enumerate(df.index)]\n else:\n label_txt = [label for i, label in enumerate(df.index)]\n label_ticks = range(len(label_txt))\n # This set the xlimits to center the xtick with the bin\n # Explanation found here:\n # https://stackoverflow.com/a/27084005/3193951\n plt.xlim([-1, len(label_txt)])\n plt.xticks(label_ticks, label_txt, rotation=90, fontsize=14)\n\n\ndef display_side_by_side(*args, multiple=True):\n \"\"\"\n Merging two dataframe into one were the first one contains the count values and the second\n one contains the percentage. They needs to have the same index name\n https://stackoverflow.com/a/44923103\n \"\"\"\n original_df1 = args[0]\n df1 = original_df1.copy()\n if multiple is True:\n original_df2 = args[1]\n df2 = original_df2.copy()\n # Round the value to display them\n # And remove the remaining trailing 0 by converting to str\n df2 = df2.round()\n df2.loc[:, df2.dtypes== np.float64] = df2.loc[:, df2.dtypes== np.float64].astype(str)\n df2 = df2.replace('\\.0', '', regex=True)\n rows, columns = df1.shape\n index_row = df2.index\n df2.index = [i.replace(' [PERCENTAGE]', '') for i in index_row]\n df2.reset_index()\n if columns == 1:\n df1['Percentage'] = df2.iloc[:, -1]\n df1.columns = ['Count', 'Percentage']\n\n else: # In case of Y-N, the df has Yes and No as columns\n if df1.columns[0] == 'Yes':\n df1['Yes_P'] = df2.iloc[:, 0]\n df1['No_P'] = df2.iloc[:, 1]\n try:\n df1.columns = ['Yes [Count]', 'No [Count]', 'NaN value', 'Yes [Percentage]', 'No [Percentage]']\n except ValueError: # In case there is not a Nan\n df1.columns = ['Yes [Count]', 'No [Count]', 'Yes [Percentage]', 'No [Percentage]']\n else:\n df1.columns = ['{} [Count]'.format(l) for l in df1.columns]\n for i, colname in enumerate(df2.columns):\n df1['{} [Percentage]'.format(colname)] = df2.iloc[:, i]\n else:\n df1.loc[:, df1.dtypes== np.float64] = df1.loc[:, df1.dtypes== np.float64].astype(str)\n df1 = df1.replace('\\.0', '', regex=True)\n return df1\n\n\ndef main():\n from counting import get_count\n from action_file import grouping_likert_yn\n from cleaning import CleaningData\n from config import CleaningConfig\n pd.set_option('display.max_rows', 300)\n\n # Load dataset\n df = pd.read_csv(CleaningConfig.raw_data)\n\n # Cleaning_process\n cleaning_process = CleaningData(df)\n df = cleaning_process.cleaning()\n cleaning_process.write_df()\n cleaning_process.write_config_file()\n\n for s in cleaning_process.structure_by_section:\n section = cleaning_process.structure_by_section[s]\n for group in section:\n for question in grouping_likert_yn(section[group]):\n list_questions = question[0]\n original_question = question[1]\n answer_format = question[2]\n file_answer = question[3]\n try:\n v_to_count = get_count(df, questions=list_questions,\n type_question=answer_format,\n file_answer=file_answer)\n try:\n get_plot(v_to_count, answer_format)\n except ValueError:\n print('list_questions')\n except KeyError:\n print('Error for the question: {}'.format(original_question))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.set_option", "matplotlib.pyplot.xticks" ] ]
iamryanmurray/metis
[ "97621bd7b9f7ed88cd27df877050926aa6425823" ]
[ "3-McNulty/random_forest_cv.py" ]
[ "from data_clean_script import *\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nimport matplotlib.pyplot as plt\nimport pickle\n\n\nX,y = split_with_bow()\nX_train, X_test, y_train, y_test = rescale_train_test(X,y)\n\n\n# Number of trees in random forest\nn_estimators = [200,400]\n# Number of features to consider at every split\nmax_features = ['sqrt','auto']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(5, 50, num = 10)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2,4,8,16]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [2, 4,8,16]\n# Method of selecting samples for training each tree\nbootstrap = [True]\n\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n\nrf = RandomForestClassifier(n_jobs=-1)\nrf_random = RandomizedSearchCV(estimator = rf, \n param_distributions = random_grid, \n n_iter = 20, cv = 3, verbose=10, \n n_jobs = -1,scoring='roc_auc')\n\n\n\nrf_random.fit(X_train,y_train)\n\nprint(rf_random.best_params_)\n\nwith open('random_forest_model.pkl', 'wb') as handle:\n pickle.dump(rf_random, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('random_forest_model_params.pkl', 'wb') as handle:\n pickle.dump(rf_random.best_params_, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n \n\n'''best_prob = rf_random.predict_proba(X_test)[:,1]\n\nfpr, tpr,thresh = roc_curve(y_test, best_prob)\nroc_auc = auc(fpr, tpr)\n\nplt.figure()\nplt.title('Random Forest')\nplt.plot([0,1],[0,1])\nplt.plot(fpr,tpr)\nplt.xlabel('FPR')\nplt.ylabel('TPR')\nplt.draw()\nplt.savefig('random_forest_2.eps')'''\n\n" ]
[ [ "sklearn.model_selection.RandomizedSearchCV", "sklearn.ensemble.RandomForestClassifier" ] ]
JunnYu/GlyceBert_tokenizer
[ "27ded9d20421e274ec2e7139e9c79da56d8ad42f" ]
[ "src/chinesebert/modeling_chinesebert.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@file : modeling_glycebert.py\n@author: zijun\n@contact : [email protected]\n@date : 2020/9/6 18:50\n@version: 1.0\n@desc : ChineseBert Model\n\"\"\"\nimport warnings\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom transformers.models.bert.modeling_bert import (\n BertEncoder,\n BertModel,\n BertOnlyMLMHead,\n BertPooler,\n BertPreTrainedModel,\n)\n\nfrom chinesebert.classifier import BertMLP\nfrom chinesebert.fusion_embedding import FusionBertEmbeddings\n\n\nclass ChineseBertModel(BertModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the models.\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the models at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n models = BertModel.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = models(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n def __init__(self, config):\n super(ChineseBertModel, self).__init__(config)\n self.config = config\n\n self.embeddings = FusionBertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n pinyin_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the models is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the models is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n \"\"\"\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n (\n encoder_batch_size,\n encoder_sequence_length,\n _,\n ) = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(\n encoder_attention_mask\n )\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n pinyin_ids=pinyin_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = (\n self.pooler(sequence_output) if self.pooler is not None else None\n )\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\nclass ChineseBertForMaskedLM(BertPreTrainedModel):\n def __init__(self, config):\n super(ChineseBertForMaskedLM, self).__init__(config)\n\n self.bert = ChineseBertModel(config)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def forward(\n self,\n input_ids=None,\n pinyin_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert (\n \"lm_labels\" not in kwargs\n ), \"Use `BertWithLMHead` for autoregressive language modeling task.\"\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n outputs = self.bert(\n input_ids,\n pinyin_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(\n prediction_scores.reshape(-1, self.config.vocab_size),\n labels.reshape(-1),\n )\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (\n ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n )\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass ChineseBertForSequenceClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = ChineseBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n pinyin_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n outputs = self.bert(\n input_ids,\n pinyin_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.reshape(-1), labels.reshape(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.reshape(-1, self.num_labels), labels.reshape(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass ChineseBertForQuestionAnswering(BertPreTrainedModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = ChineseBertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n pinyin_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n outputs = self.bert(\n input_ids,\n pinyin_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass ChineseBertForTokenClassification(BertPreTrainedModel):\n def __init__(self, config, mlp=False):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = ChineseBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n if mlp:\n self.classifier = BertMLP(config)\n else:\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n pinyin_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n \"\"\"\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n outputs = self.bert(\n input_ids,\n pinyin_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep the active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.reshape(-1) == 1\n active_logits = logits.reshape(-1, self.num_labels)\n active_labels = torch.where(\n active_loss,\n labels.reshape(-1),\n torch.tensor(loss_fct.ignore_index).type_as(labels),\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.reshape(-1, self.num_labels), labels.reshape(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.ones", "torch.zeros", "torch.tensor", "torch.nn.Linear", "torch.nn.MSELoss" ] ]
patrickvonplaten/google-research
[ "2c0043ecd507e75e2df9973a3015daf9253e1467" ]
[ "tf3d/instance_segmentation/postprocessor_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for ...threed.instance_segmentation.postprocessor.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tf3d import standard_fields\nfrom tf3d.instance_segmentation import postprocessor\n\n\nclass PostprocessorTest(tf.test.TestCase):\n\n def test_postprocess_without_nms(self):\n num_voxels = 10000\n outputs = {\n standard_fields.DetectionResultFields.object_semantic_voxels:\n tf.random.uniform([num_voxels, 10],\n minval=-1.0,\n maxval=1.0,\n dtype=tf.float32),\n standard_fields.DetectionResultFields.instance_embedding_voxels:\n tf.random.uniform([num_voxels, 64],\n minval=-1.0,\n maxval=1.0,\n dtype=tf.float32)\n }\n postprocessor.postprocess(\n outputs=outputs,\n num_furthest_voxel_samples=200,\n sampler_score_vs_distance_coef=0.5,\n embedding_similarity_strategy='distance',\n apply_nms=False,\n nms_score_threshold=0.1)\n self.assertAllEqual(\n outputs[standard_fields\n .DetectionResultFields.instance_segments_voxel_mask].shape,\n np.array([200, num_voxels]))\n self.assertAllEqual(\n outputs[standard_fields.DetectionResultFields.objects_class].shape,\n np.array([200, 1]))\n self.assertAllEqual(\n outputs[standard_fields.DetectionResultFields.objects_score].shape,\n np.array([200, 1]))\n\n def test_postprocess_with_nms(self):\n num_voxels = 10000\n outputs = {\n standard_fields.DetectionResultFields.object_semantic_voxels:\n tf.random.uniform([num_voxels, 10],\n minval=-1.0,\n maxval=1.0,\n dtype=tf.float32),\n standard_fields.DetectionResultFields.instance_embedding_voxels:\n tf.random.uniform([num_voxels, 64],\n minval=-1.0,\n maxval=1.0,\n dtype=tf.float32)\n }\n postprocessor.postprocess(\n outputs=outputs,\n num_furthest_voxel_samples=200,\n sampler_score_vs_distance_coef=0.5,\n embedding_similarity_strategy='distance',\n apply_nms=True,\n nms_score_threshold=0.1)\n num_instances = outputs[standard_fields.DetectionResultFields\n .instance_segments_voxel_mask].shape[0]\n self.assertAllEqual(\n outputs[standard_fields.DetectionResultFields\n .instance_segments_voxel_mask].shape,\n np.array([num_instances, num_voxels]))\n self.assertAllEqual(\n outputs[standard_fields.DetectionResultFields.objects_class].shape,\n np.array([num_instances, 1]))\n self.assertAllEqual(\n outputs[standard_fields.DetectionResultFields.objects_score].shape,\n np.array([num_instances, 1]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.array", "tensorflow.random.uniform", "tensorflow.test.main" ] ]
chrisabbott/corenet
[ "1ba76ac7bad6b00d8b601eac83f105b83a6362a9" ]
[ "corenet/data/scene.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Routines for reading synthetic 3D scenes.\"\"\"\n\nimport dataclasses as d\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import Text\n\nimport PIL.Image\nimport io\nimport numpy as np\nimport torch as t\n\nfrom corenet import file_system as fs\nfrom corenet import misc_util as util\n\n\[email protected](frozen=True)\nclass Scene(util.TensorContainerMixin):\n \"\"\"A rendered synthetic scene.\"\"\"\n\n # The untransformed triangle vertices of each mesh,\n # List[float32[num_triangles, 3, 3]]\n mesh_vertices: List[t.Tensor]\n\n # The view transform applied to the scene, float32[4, 4]\n view_transform: t.Tensor\n\n # The object-to-world transforms, float32[num_meshes, 4, 4]\n o2w_transforms: t.Tensor\n\n # Camera transform for the contained geometry, float32[4, 4]\n camera_transform: t.Tensor\n\n # The mesh labels, string[num_meshes].\n mesh_labels: List[Text]\n\n # Visible fraction of each mesh in the image, float32[num_meshes].\n mesh_visible_fractions: t.Tensor\n\n # An eye-lit image rendered with OpenGL, uint8[height, width, 3]\n opengl_image: t.Tensor\n\n # Scene rendered with global illumination (using PBRT),\n # uint8[height, width, 3]\n pbrt_image: t.Tensor\n\n # The untransformed mesh normals, List[float32[num_triangles, 3, 3]]\n normals: List[t.Tensor] = d.field(default_factory=lambda: [])\n\n # The mesh texcoords, List[float32[num_triangles, 3, 2]]\n texcoords: List[t.Tensor] = d.field(default_factory=lambda: [])\n\n # The mesh material ids, List[int32[num_triangles]]\n material_ids: List[t.Tensor] = d.field(default_factory=lambda: [])\n\n # The mesh diffuse material colors, float32[num_materials, 3]\n diffuse_colors: List[t.Tensor] = d.field(default_factory=lambda: [])\n\n # The diffuse texture PNGs, List[string[num_materials]]. An empty string here\n # corresponds to a material without a texture\n diffuse_texture_pngs: List[List[bytes]] = d.field(default_factory=lambda: [])\n\n\ndef _load_image(i):\n return util.to_tensor(np.array(PIL.Image.open(io.BytesIO(i))), t.uint8)\n\n\nclass NpzReader:\n def __init__(self, path: str):\n # noinspection PyTypeChecker\n self.npz = np.load(io.BytesIO(fs.read_bytes(path)), allow_pickle=True)\n\n def tensor(self, item: str, dtype: Optional[t.dtype] = None) -> t.Tensor:\n result = self.npz[item] # type: np.ndarray\n if dtype:\n return util.to_tensor(result, dtype)\n else:\n return t.as_tensor(result)\n\n def list(self, item: str) -> List[Any]:\n result = self.npz[item] # type: np.ndarray\n assert len(result.shape) == 1\n return list(result)\n\n def scalar(self, item: str) -> Any:\n result = self.npz[item] # type: np.ndarray\n assert len(result.shape) == 0\n return result\n\n\ndef load_from_npz(path: Text, meshes_dir: Text,\n load_extra_fields=False) -> Scene:\n \"\"\"Loads an input example.\n\n Args:\n path: Path to NPZ with scene.\n meshes_dir: Path containing ShapeNet meshes.\n load_extra_fields: Whether to load extra fields that are not required for\n running the pipeline (e.g. texture coordinates)\n\n Returns:\n The loaded input example.\n\n \"\"\"\n scene_npz = NpzReader(path)\n mesh_paths = [\n fs.join(meshes_dir, *v) + \".npz\"\n for v in\n zip(scene_npz.list(\"mesh_labels\"), scene_npz.list(\"mesh_filenames\"))]\n\n result = Scene(\n mesh_vertices=[],\n view_transform=scene_npz.tensor(\"view_transform\", t.float32),\n o2w_transforms=scene_npz.tensor(\"mesh_object_to_world_transforms\",\n t.float32),\n camera_transform=scene_npz.tensor(\"camera_transform\", t.float32),\n mesh_labels=[v for v in scene_npz.list(\"mesh_labels\")],\n opengl_image=_load_image(scene_npz.scalar(\"opengl_image\")),\n pbrt_image=_load_image(scene_npz.scalar(\"pbrt_image\")),\n mesh_visible_fractions=scene_npz.tensor(\"mesh_visible_fractions\",\n t.float32),\n )\n\n for mesh_path in mesh_paths:\n # noinspection PyTypeChecker\n mesh_npz = NpzReader(mesh_path)\n result.mesh_vertices.append(mesh_npz.tensor(\"vertices\", t.float32))\n\n if load_extra_fields:\n result.normals.append(mesh_npz.tensor(\"normals\", t.float32))\n result.material_ids.append(mesh_npz.tensor(\"material_ids\", t.int32))\n result.texcoords.append(mesh_npz.tensor(\"texcoords\", t.float32))\n result.diffuse_colors.append(mesh_npz.tensor(\"diffuse_colors\", t.float32))\n result.diffuse_texture_pngs.append(\n mesh_npz.scalar(\"diffuse_texture_pngs\"))\n return result\n" ]
[ [ "torch.as_tensor" ] ]
voschezang/Holographic-Projector-Simulations
[ "e127cf86093f15ed68c36592591e2eee196b5705" ]
[ "py/surf.py" ]
[ "import numpy as np\nimport subprocess\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tck\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n# local\nimport plot\nimport animate\nimport util\nfrom util import DIMS\n\nANGLES = (30, 60)\n\n\ndef surf(x, y, z, Nx: int, Ny: int, ax=None, **kwargs):\n if 'cmap' not in kwargs:\n global cmap\n kwargs['cmap'] = plot.cmap\n\n if ax is None:\n # fig = plt.figure(figsize=(9, 7))\n fig = plt.figure(figsize=(6, 4))\n # fig = plt.figure(figsize=(7, 4))\n ax = fig.gca(projection='3d')\n X = x.reshape((Nx, Ny))\n Y = y.reshape((Nx, Ny))\n Z = z.reshape((Nx, Ny))\n\n surf = ax.plot_surface(X, Y, Z, linewidth=0, antialiased=True, **kwargs)\n ax.view_init(*ANGLES) # angle\n return ax, surf\n\n\ndef surf_multiple(phasor, position, Nx: int, Ny: int, prefix='', filename=None):\n labels = ['Amplitude', 'Amplitude', 'Irradiance'\n # , 'Log Irradiance'\n ]\n for i, label in enumerate(labels):\n amp = phasor[:, 0]\n if label == 'Amplitude':\n z = amp\n else:\n z = amp ** 2\n if i == 3:\n log_irradiance = np.log(np.clip(amp ** 2, 1e-9, None))\n # z = log_irradiance\n # log_irradiance = np.log(util.irradiance(\n # util.to_polar(a, phi), normalize=False))\n z = util.standardize(log_irradiance)\n assert abs(z.min()) < 1e-3\n assert abs(1 - z.max()) < 1e-3\n\n z_log = i in [1, 2]\n lower_bound = 1e-6 # assume max = 1\n if z_log:\n # manual log zscale\n z = np.clip(z, lower_bound, None)\n mini, maxi = z.min(), z.max()\n if mini == maxi or mini <= 0:\n continue\n else:\n mini = round(np.log10(mini))\n maxi = round(np.log10(maxi))\n if mini == maxi:\n continue\n z = np.log10(z)\n\n # ignore third dimension in position\n ax, _ = surf(position[:, 0], position[:, 1], z, Nx, Ny)\n if z_log and mini != maxi:\n n_ticks = int(maxi - mini) + 1\n if n_ticks > 8 and n_ticks % 2 == 1:\n n_ticks = round(n_ticks / 2.)\n\n assert(n_ticks > 1)\n ticks = np.linspace(mini, maxi, n_ticks,\n endpoint=True).round().astype(int)\n labels = [f'$10^{{{v}}}$' for v in ticks]\n # ax.set_zticks(ticks) # auto\n ax.set_zticklabels(labels)\n\n plt.xlabel('Space')\n plt.ylabel('Space')\n formatter = tck.EngFormatter(places=1, sep=u\"\\N{THIN SPACE}\", unit='m')\n ax.xaxis.set_major_formatter(formatter)\n ax.yaxis.set_major_formatter(formatter)\n plt.xticks(rotation=ANGLES[0] / 2, rotation_mode='anchor')\n plt.yticks(rotation=-ANGLES[1] / 4, rotation_mode='anchor')\n plot.set_num_xyticks(3)\n\n plt.title(f'{prefix}{label}')\n plt.tight_layout()\n if filename is None:\n plt.show()\n else:\n suffix = label.replace(' ', '') + f'-{i}'\n plot.save_fig(f'{filename}_{suffix}', ext='png')\n\n plt.close()\n\n\nif __name__ == '__main__':\n sequence_dir = util.get_arg('--sequence_dir', '', parse_func=str)\n dir = '../tmp'\n fn = 'out.zip'\n size = os.path.getsize(os.path.join(dir, fn))\n print(f'Input file size: {size * 1e-6:0.5f} MB')\n if size > 1e6:\n print(f'Warning, file too large: {size*1e-6:0.4f} MB')\n\n params, data = util.parse_file(dir, fn, 'out')\n # print('uu', data['u'])\n\n N = data['y'][0].shape[0]\n ratio = params['y'][0]['aspect_ratio']\n Nx, Ny = util.solve_xy_is_a(N, ratio)\n Nxy = Nx * Ny\n for k in 'yv':\n data[k] = data[k][:Nxy]\n\n print({'N': N, 'Nx': Nx, 'Ny': Ny, 'eq': Nx * Ny == N})\n N_sqrt = int(np.sqrt(N))\n print(f'N sqrt (y): {N_sqrt}')\n # max_n_plots = 2\n # m = len(data['y'])\n # args = (m,) if m <= max_n_plots else (\n # 0, m, np.ceil(m / max_n_plots).astype(int))\n # for i in range(*args):\n # surf_multiple(data['y'][i], data['v'][i], Nx, Ny f'$y_{i} $')\n\n m = len(data['y'])\n args1 = (m,) if m <= 2 else (0, m, np.ceil(m / 2).astype(int))\n n_z_per_y = len(data['z']) // len(data['y'])\n m = n_z_per_y\n args2 = (m,) if m <= 1 else (0, m, np.ceil(m / 2).astype(int))\n for major in range(*args1):\n for minor in range(*args2):\n i = major * n_z_per_y + minor\n offset = params['z'][i]['z_offset']\n title = f\"$z_{{{major}, {minor}}}$ \" + \\\n f\"(offset: {round(offset, 2)} m)\"\n print(title)\n prefix = f'$z_{{{major},{minor}}}$ '\n surf_multiple(data['z'][i], data['w'][i], N_sqrt, N_sqrt, prefix)\n # surf_multiple(data['y'][i], data['v'][i], N_sqrt, N_sqrt, prefix)\n" ]
[ [ "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.clip", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.ceil", "matplotlib.ticker.EngFormatter", "numpy.log10", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Nas-virat/CPE101
[ "0d121540408fae12fb122876e3d6a7a3ad1b7503" ]
[ "augment.py" ]
[ "import cv2\nimport random\nimport numpy as np\nfrom tf_pose import common\nimport os\n\ndef fill(img, h, w):\n img = cv2.resize(img, (h, w), cv2.INTER_CUBIC)\n return img\n\ndef rotation(img, angle):\n angle = int(random.uniform(-angle, angle))\n h, w = img.shape[:2]\n M = cv2.getRotationMatrix2D((int(w/2), int(h/2)), angle, 1)\n img = cv2.warpAffine(img, M, (w, h))\n return img\n\ndef brightness(img, low, high):\n value = random.uniform(low, high)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n hsv = np.array(hsv, dtype = np.float64)\n hsv[:,:,1] = hsv[:,:,1]*value\n hsv[:,:,1][hsv[:,:,1]>255] = 255\n hsv[:,:,2] = hsv[:,:,2]*value \n hsv[:,:,2][hsv[:,:,2]>255] = 255\n hsv = np.array(hsv, dtype = np.uint8)\n img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return img\n\ndef channel_shift(img, value):\n value = int(random.uniform(-value, value))\n img = img + value\n img[:,:,:][img[:,:,:]>255] = 255\n img[:,:,:][img[:,:,:]<0] = 0\n img = img.astype(np.uint8)\n return img\n\ndef zoom(img, value):\n if value > 1 or value < 0:\n print('Value for zoom should be less than 1 and greater than 0')\n return img\n value = random.uniform(value, 0.5)\n h, w = img.shape[:2]\n h_taken = int(value*h)\n w_taken = int(value*w)\n h_start = random.randint(0, h-h_taken)\n w_start = random.randint(0, w-w_taken)\n img = img[h_start:h_start+h_taken, w_start:w_start+w_taken, :]\n img = fill(img, h, w)\n return img\n\n\ndef horizontal_flip(img, flag):\n if flag:\n return cv2.flip(img, 1)\n else:\n return img\n\n\ndef get_files(directory):\n results = []\n for file_name in os.listdir(directory):\n tmp, ext = os.path.splitext(file_name)\n if ext == '.jpg':\n results.append(file_name)\n return results\n\nCLASS = 'squats_down'\n\nINPUT_DIRECTORY = 'traindata/' + CLASS\nOUTPUT_DIRECTORY = 'augment_image/' + CLASS\n\nANGLE = 5\nif __name__ == '__main__':\n\n for f in get_files(INPUT_DIRECTORY):\n\n print('Process image: %s...' % f)\n\n img = common.read_imgfile(os.path.join(INPUT_DIRECTORY, f), None, None)\n \n rows,cols, _ = img.shape\n minus = random.randint(0,1)\n if minus == 0:\n minus = -1\n per = random.randint(5,10) \n M = np.float32([[1,0,minus*rows*per/100],[0,1,0]])\n output_image = cv2.warpAffine(img,M,(cols,rows))\n\n\n cv2.imwrite(os.path.join(OUTPUT_DIRECTORY, 'shift_' + f), output_image)\n print('save file',os.path.join(OUTPUT_DIRECTORY, 'shift' + f))\n '''\n output_image = rotation(img,ANGLE)\n cv2.imwrite(os.path.join(OUTPUT_DIRECTORY, 'rota_' + f), output_image)\n print('save file',os.path.join(OUTPUT_DIRECTORY, 'rota' + f))\n\n \n output_image = horizontal_flip(img,1)\n cv2.imwrite(os.path.join(OUTPUT_DIRECTORY, 'flip_' + f), output_image)\n print('save file',os.path.join(OUTPUT_DIRECTORY, 'flip' + f))\n '''\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.float32" ] ]
StevenKe8080/recognition_gender
[ "1cb226edd2c8bb2c678fc022847312fe36c75194" ]
[ "image_train.py" ]
[ "import random\nimport numpy as np\nfrom handle_image import get_file\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.models import Sequential,load_model\nfrom keras.layers import Dense,Activation,Convolution2D,MaxPooling2D,Flatten,Dropout\nfrom keras.optimizers import Adam\n\n#建立数据\nclass DataSet(object):\n def __init__(self):\n self.nb_classes = None\n self.X_train = None\n self.X_test = None\n self.Y_train = None\n self.Y_test = None\n self.img_size = 128\n\n def extract_data(self,train_path):\n imgs, labels, counter = get_file(train_path)\n print(labels)\n # 避免过拟合,采用交叉验证,验证集占训练集30%,固定随机种子(random_state)\n X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.3,\n random_state=random.randint(0, 100))\n\n #数据预处理 keras backend 用的TensorFlow 黑白图片 channel 1\n X_train = X_train.reshape(X_train.shape[0], 1, self.img_size, self.img_size) / 255.\n X_test = X_test.reshape(X_test.shape[0], 1, self.img_size, self.img_size) / 255.\n\n #label 转为 one-hot 数据\n Y_train = np_utils.to_categorical(y_train, num_classes=counter)\n Y_test = np_utils.to_categorical(y_test, num_classes=counter)\n\n self.X_train = X_train\n self.X_test = X_test\n self.Y_train = Y_train\n self.Y_test = Y_test\n self.nb_classes = counter\n\n\n#建立model 使用CNN(卷积神经网络)\nclass Model(object):\n FILE_PATH = \"store/model.h5\"\n IMAGE_SIZE = 128\n def __init__(self):\n self.model = None\n\n def build_model(self,dataset):\n self.model = Sequential()\n #进行一层卷积 输出 shape (32,128,128)\n self.model.add(Convolution2D(filters=32,kernel_size=5,strides=1, padding='same',data_format='channels_first', input_shape=dataset.X_train.shape[1:]))\n #使用relu激励函数\n self.model.add(Activation('relu'))\n #池化,输出为shape (32,64,64)\n self.model.add(MaxPooling2D(pool_size=2,strides=2,padding='same',data_format='channels_first'))\n #dropout 防止过拟合\n self.model.add(Dropout(0.25))\n\n #进行一层卷积 输出为shape (64,32,32)\n self.model.add(Convolution2D(64, 5, strides=1, padding='same', data_format='channels_first'))\n # 使用relu激励函数\n self.model.add(Activation('relu'))\n # 池化,输出为原来的一半 shape (64,32,32)\n self.model.add(MaxPooling2D(2, 2, 'same', data_format='channels_first'))\n # dropout 防止过拟合\n self.model.add(Dropout(0.25))\n\n #全连接层\n self.model.add(Flatten())\n self.model.add(Dense(512))\n self.model.add(Activation('relu'))\n self.model.add(Dropout(0.5))\n self.model.add(Dense(dataset.nb_classes))\n self.model.add(Activation('softmax'))\n\n self.model.summary()\n\n def train(self,dataset):\n adam = Adam(lr=1e-4)\n self.model.compile(optimizer=adam,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # epochs 循环次数 batch_size 批处理大小\n self.model.fit(dataset.X_train, dataset.Y_train, epochs=25, batch_size=32, )\n\n def save(self, file_path=FILE_PATH):\n print('Model 保存.')\n self.model.save(file_path)\n\n def load(self, file_path=FILE_PATH):\n print('Model 读取.')\n self.model = load_model(file_path)\n\n #预测\n def predict(self,img):\n img = img.reshape((1, 1, self.IMAGE_SIZE, self.IMAGE_SIZE))\n img = img.astype('float32')\n img = img/255.0\n\n result = self.model.predict_proba(img) #预测图像结果\n max_index = np.argmax(result) #取平局值最大\n print(\"begin\")\n print(result)\n print(max_index)\n print(result[0][max_index])\n print(\"end\")\n return max_index,result[0][max_index] #第一个参数为概率最高的label的index,第二个参数为对应概率\n\n def evaluate(self, dataset):\n loss,score = self.model.evaluate(dataset.X_test, dataset.Y_test, verbose=0)\n # print(\"%s: %.2f%%\" % (self.model.metrics_names[1], score[1] * 100))\n print('\\ntest loss: ', loss)\n print('\\ntest accuracy: ', score)\n\nif __name__ == '__main__':\n dataset = DataSet()\n dataset.extract_data('gender_image')\n\n model = Model()\n model.build_model(dataset)\n model.train(dataset)\n model.save()\n\n model = Model()\n model.load()\n model.evaluate(dataset)\n" ]
[ [ "numpy.argmax" ] ]
lavanyashukla/ml-fairness-gym
[ "fb68b379d4284b7af746b2a051d518b3bd45ab00" ]
[ "agents/allocation_agents_test.py" ]
[ "# coding=utf-8\n# Copyright 2019 The ML Fairness Gym Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Tests for naive_probability_matching_allocator.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\nimport core\nimport rewards\nimport test_util\nfrom agents import allocation_agents\nfrom environments import attention_allocation\nimport gym\nimport numpy as np\nfrom six.moves import range\n\n\nclass NaiveProbabilityMatchingAgentTest(absltest.TestCase):\n\n def test_update_counts(self):\n \"\"\"Check that counts are updated correctly given an observation.\"\"\"\n env = attention_allocation.LocationAllocationEnv()\n agent_params = allocation_agents.NaiveProbabilityMatchingAgentParams()\n agent_params.decay_prob = 0\n agent = allocation_agents.NaiveProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'),\n params=agent_params)\n counts = [3, 6, 8]\n observation = np.array([1, 2, 0])\n updated_counts = agent._update_beliefs(observation, counts)\n self.assertTrue(np.all(np.equal(updated_counts, [4, 8, 8])))\n\n def test__allocate_by_counts(self):\n \"\"\"Check allocation proportions match probabilities from counts.\"\"\"\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.NaiveProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'))\n counts = [3, 6, 8]\n n_resource = 20\n n_samples = 100\n samples = [agent._allocate(n_resource, counts) for _ in range(n_samples)]\n counts_normalized = [(count / float(np.sum(counts))) for count in counts]\n samples_normalized = [\n (count / float(np.sum(samples))) for count in np.sum(samples, axis=0)\n ]\n self.assertTrue(\n np.all(np.isclose(counts_normalized, samples_normalized, atol=0.05)))\n\n def test_allocate_by_counts_zero(self):\n \"\"\"Check allocations are even when counts are zero.\"\"\"\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.NaiveProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'))\n counts = [0, 0, 0]\n n_resource = 15\n n_samples = 100\n samples = [agent._allocate(n_resource, counts) for _ in range(n_samples)]\n mean_samples = np.sum(samples, axis=0) / float(n_samples)\n expected_mean = n_resource / float(len(counts))\n std_dev = np.std(samples)\n means_close = [\n np.abs(mean - expected_mean) < std_dev for mean in mean_samples\n ]\n self.assertTrue(np.all(means_close))\n\n def test_can_interact_with_attention_env(self):\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.NaiveProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'))\n test_util.run_test_simulation(env=env, agent=agent)\n\n def test_get_added_vector_features(self):\n action_space_len = 2\n observation = {\n 'incidents_seen': np.array([0, 1]),\n 'incidents_reported': np.array([3, 1])\n }\n features = allocation_agents._get_added_vector_features(\n observation, action_space_len)\n expected = [3.0, 2.0]\n self.assertSequenceAlmostEqual(features.tolist(), expected)\n features = allocation_agents._get_added_vector_features(\n observation, action_space_len, keys=['incidents_reported'])\n expected = [3.0, 1.0]\n self.assertSequenceAlmostEqual(features.tolist(), expected)\n\n def test_episode_done_raises_error(self):\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.NaiveProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'))\n observation = env.reset()\n with self.assertRaises(core.EpisodeDoneError):\n agent.act(observation, done=True)\n\n\nclass MLEProbabilityMatchingAgentTest(absltest.TestCase):\n\n def test_can_interact_with_attention_env(self):\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.MLEProbabilityMatchingAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'),\n params=None)\n test_util.run_test_simulation(env=env, agent=agent)\n\n def test_MLE_rate_estimation(self):\n env_params = attention_allocation.Params()\n env_params.prior_incident_counts = (500, 500)\n env_params.n_attention_units = 5\n\n # pylint: disable=g-long-lambda\n agent_params = allocation_agents.MLEProbabilityMatchingAgentParams()\n\n agent_params.feature_selection_fn = lambda obs: allocation_agents._get_added_vector_features(\n obs, env_params.n_locations, keys=['incidents_seen'])\n agent_params.interval = 200\n agent_params.epsilon = 0\n\n env = attention_allocation.LocationAllocationEnv(env_params)\n agent = allocation_agents.MLEProbabilityMatchingAgent(\n action_space=env.action_space,\n reward_fn=lambda x: None,\n observation_space=env.observation_space,\n params=agent_params)\n seed = 0\n agent.rng.seed(seed)\n env.seed(seed)\n observation = env.reset()\n done = False\n steps = 200\n for _ in range(steps):\n action = agent.act(observation, done)\n observation, _, done, _ = env.step(action)\n\n self.assertTrue(\n np.all(\n np.isclose(\n list(agent.beliefs), list(env_params.incident_rates),\n atol=0.5)))\n\n\nclass MLEGreedyAgentTest(absltest.TestCase):\n\n def test_can_interact_with_attention_env(self):\n env = attention_allocation.LocationAllocationEnv()\n agent = allocation_agents.MLEGreedyAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'))\n test_util.run_test_simulation(env=env, agent=agent)\n\n def test_allocate_beliefs_fair_unsatisfiable(self):\n env_params = attention_allocation.Params(\n n_locations=4,\n prior_incident_counts=(10, 10, 10, 10),\n n_attention_units=5,\n incident_rates=[0, 0, 0, 0])\n env = attention_allocation.LocationAllocationEnv(params=env_params)\n agent_params = allocation_agents.MLEGreedyAgentParams(\n epsilon=0.0, alpha=0.25)\n agent = allocation_agents.MLEGreedyAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'),\n params=agent_params)\n with self.assertRaises(gym.error.InvalidAction):\n agent._allocate(5, [5, 2, 1, 1])\n\n def test_allocate_beliefs_fair(self):\n env_params = attention_allocation.Params(\n n_locations=4,\n prior_incident_counts=(10, 10, 10, 10),\n n_attention_units=6,\n incident_rates=[0, 0, 0, 0])\n env = attention_allocation.LocationAllocationEnv(params=env_params)\n agent_params = allocation_agents.MLEGreedyAgentParams(\n epsilon=0.0, alpha=0.25)\n agent = allocation_agents.MLEGreedyAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'),\n params=agent_params)\n allocation = agent._allocate(6, [5, 2, 1, 1])\n self.assertTrue(np.all(np.equal(allocation, [3, 1, 1, 1])))\n\n def test_allocate_beliefs_greedy(self):\n env_params = attention_allocation.Params(\n n_locations=4,\n prior_incident_counts=(10, 10, 10, 10),\n n_attention_units=5,\n incident_rates=[0, 0, 0, 0])\n env = attention_allocation.LocationAllocationEnv(params=env_params)\n agent_params = allocation_agents.MLEGreedyAgentParams(epsilon=0.0)\n agent = allocation_agents.MLEGreedyAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n reward_fn=rewards.VectorSumReward('incidents_seen'),\n params=agent_params)\n allocation = agent._allocate(5, [5, 2, 1, 1])\n self.assertTrue(np.all(np.equal(allocation, [4, 1, 0, 0])))\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.abs", "numpy.all", "numpy.std", "numpy.equal", "numpy.array", "numpy.sum", "numpy.isclose" ] ]
jhosoume/pymfe
[ "b454f8a5470ebd4dbbeb1a7e5570443dd65fdf9a" ]
[ "pymfe/statistical.py" ]
[ "\"\"\"A module dedicated to the extraction of statistical metafeatures.\"\"\"\nimport typing as t\nimport warnings\n\nimport numpy as np\nimport scipy\nimport sklearn.preprocessing\nimport sklearn.cross_decomposition\n\nimport pymfe._summary as _summary\n\n\nclass MFEStatistical:\n \"\"\"Keep methods for metafeatures of ``Statistical`` group.\n\n The convention adopted for metafeature-extraction related methods\n is to always start with ``ft_`` prefix in order to allow automatic\n method detection. This prefix is predefined within ``_internal``\n module.\n\n All method signature follows the conventions and restrictions listed\n below:\n\n 1. For independent attribute data, ``X`` means ``every type of attribute``,\n ``N`` means ``Numeric attributes only`` and ``C`` stands for\n ``Categorical attributes only``. It is important to note that the\n categorical attribute sets between ``X`` and ``C`` and the numerical\n attribute sets between ``X`` and ``N`` may differ due to data\n transformations, performed while fitting data into MFE model,\n enabled by, respectively, ``transform_num`` and ``transform_cat``\n arguments from ``fit`` (MFE method).\n\n 2. Only arguments in MFE ``_custom_args_ft`` attribute (set up inside\n ``fit`` method) are allowed to be required method arguments. All other\n arguments must be strictly optional (i.e., has a predefined\n default value).\n\n 3. It is assumed that the user can change any optional argument, without\n any previous verification for both type or value, via kwargs argument of\n ``extract`` method of MFE class.\n\n 4. The return value of all feature-extraction methods should be a single\n value or a generic List (preferably an np.ndarray)\n type with numeric values.\n\n There is another type of method adopted for automatic detection. It is\n adopted the prefix ``precompute_`` for automatic detection of these\n methods. These methods run while fitting some data into an MFE model\n automatically, and their objective is to precompute some common value\n shared between more than one feature extraction method. This strategy is a\n trade-off between more system memory consumption and speeds up of feature\n extraction. Their return value must always be a dictionary whose keys are\n possible extra arguments for both feature extraction methods and other\n precomputation methods. Note that there is a share of precomputed values\n between all valid feature-extraction modules (e.g., ``class_freqs``\n computed in module ``statistical`` can freely be used for any\n precomputation or feature extraction method of module ``landmarking``).\n \"\"\"\n\n @classmethod\n def precompute_statistical_class(\n cls, y: t.Optional[np.ndarray] = None, **kwargs\n ) -> t.Dict[str, t.Any]:\n \"\"\"Precompute distinct classes and its abs. frequencies from ``y``.\n\n Parameters\n ----------\n y : :obj:`np.ndarray`\n The target attribute from fitted data.\n\n kwargs:\n Additional arguments. May have previously precomputed before this\n method from other precomputed methods, so they can help speed up\n this precomputation.\n\n Returns\n -------\n :obj:`dict`\n With following precomputed items:\n * ``classes`` (:obj:`np.ndarray`): distinct classes of ``y``,\n if ``y`` is not :obj:`NoneType`.\n * ``class_freqs`` (:obj:`np.ndarray`): absolute class\n frequencies of ``y``, if ``y`` is not :obj:`NoneType`.\n \"\"\"\n precomp_vals = {} # type: t.Dict[str, t.Any]\n\n if y is not None and not {\"classes\", \"class_freqs\"}.issubset(kwargs):\n classes, class_freqs = np.unique(y, return_counts=True)\n\n precomp_vals[\"classes\"] = classes\n precomp_vals[\"class_freqs\"] = class_freqs\n\n return precomp_vals\n\n @classmethod\n def precompute_can_cors(\n cls,\n N: t.Optional[np.ndarray] = None,\n y: t.Optional[np.ndarray] = None,\n **kwargs\n ) -> t.Dict[str, t.Any]:\n \"\"\"Precompute canonical correlations and its eigenvalues.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`, optional\n Numerical fitted data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n kwargs:\n Additional arguments. May have previously precomputed before this\n method from other precomputed methods, so they can help speed up\n this precomputation.\n\n Returns\n -------\n :obj:`dict`\n With following precomputed items:\n - ``can_cors`` (:obj:`np.ndarray`): canonical correlations\n between ``N`` and the one-hot encoded version of ``y``.\n - ``can_cor_eigvals`` (:obj:`np.ndarray`): eigenvalues\n related to the canonical correlations.\n \"\"\"\n precomp_vals = {} # type: t.Dict[str, t.Any]\n\n if (\n y is not None\n and N is not None\n and N.size\n and not {\"can_cors\", \"can_cor_eigvals\"}.issubset(kwargs)\n ):\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n precomp_vals[\"can_cors\"] = can_cors\n precomp_vals[\"can_cor_eigvals\"] = cls._can_cor_to_eigval(can_cors)\n\n return precomp_vals\n\n @classmethod\n def precompute_statistical_cor_cov(\n cls, N: t.Optional[np.ndarray] = None, ddof: int = 1, **kwargs\n ) -> t.Dict[str, t.Any]:\n \"\"\"Precomputes the correlation and covariance matrix of numerical data.\n\n Be cautious in allowing this precomputation method on huge datasets, as\n this precomputation method may be very memory hungry.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`, optional\n Numerical fitted data.\n\n ddof : int, optional\n Degrees of freedom of covariance matrix.\n\n kwargs:\n Additional arguments. May have previously precomputed before this\n method from other precomputed methods, so they can help speed up\n this precomputation.\n\n Returns\n -------\n :obj:`dict`\n With following precomputed items:\n - ``cov_mat`` (:obj:`np.ndarray`): covariance matrix of\n ``N``, if ``N`` is not :obj:`NoneType`.\n - ``abs_corr_mat`` (:obj:`np.ndarray`): absolute\n correlation matrix of ``N``, if ``N`` is not\n :obj:`NoneType`.\n \"\"\"\n precomp_vals = {} # type: t.Dict[str, t.Any]\n\n if N is not None and N.size:\n if \"cov_mat\" not in kwargs:\n precomp_vals[\"cov_mat\"] = np.cov(N, rowvar=False, ddof=ddof)\n\n if \"abs_corr_mat\" not in kwargs:\n abs_corr_mat = np.abs(np.corrcoef(N, rowvar=False))\n\n if not isinstance(abs_corr_mat, np.ndarray) and np.isnan(\n abs_corr_mat\n ):\n abs_corr_mat = np.array([np.nan])\n\n precomp_vals[\"abs_corr_mat\"] = abs_corr_mat\n\n return precomp_vals\n\n @staticmethod\n def _can_cor_to_eigval(can_cors: np.ndarray) -> np.ndarray:\n \"\"\"Transform canonical correlations into corresponding eigenvalues.\n\n The transformation uses the following relationship:\n\n can_cor_i = sqrt(can_cor_eigval_i / (1 + can_cor_eigval_i))\n\n Or, equivalently:\n\n can_cor_eigval_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n So, the option to return the eigenvalues is meant to simplify\n code that uses those values, not to generate extra inforation.\n \"\"\"\n sqr_can_cors = np.square(can_cors)\n can_cor_eig_vals = sqr_can_cors / (1 - sqr_can_cors)\n return can_cor_eig_vals\n\n @classmethod\n def _calc_can_cors(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n ) -> np.ndarray:\n \"\"\"Calculate the Canonical Correlations between ``N`` and ``y.``\n\n Note that the canonical correlations are calculated using the\n one-hot encoded version of ``y.``\n\n At most min(num_classes, num_attr) canonical correlations are\n kept.\n \"\"\"\n y_bin = sklearn.preprocessing.OneHotEncoder(\n sparse=False,\n drop=\"first\",\n ).fit_transform(y.reshape(-1, 1))\n\n num_classes, num_attr = y_bin.shape[1], N.shape[1]\n # Note: 'n_components' is a theoretical upper bound, so it is not\n # guaranteed that exactly 'n_components' will be returned.\n n_components = min(num_classes, num_attr)\n\n # Note: 'sklearn.cross_decomposition.CCA' issues UserWarnings\n # whenever less than 'n_components' are got. However, this is\n # already taken into account in this function, so no need for\n # those warnings.\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n\n cca_model = sklearn.cross_decomposition.CCA(\n n_components=n_components, max_iter=500\n )\n\n try:\n cca_model.fit(N, y_bin)\n\n except StopIteration:\n pass\n\n N_tf, y_tf = cca_model.transform(N, y_bin)\n\n warnings.filterwarnings(\"default\", category=UserWarning)\n\n ind = 0\n can_cors = np.zeros(n_components, dtype=float)\n\n while ind < n_components and np.any(np.flatnonzero(N_tf[:, ind])):\n can_cors[ind] = np.corrcoef(N_tf[:, ind], y_tf[:, ind])[0, 1]\n ind += 1\n\n can_cors = can_cors[:ind]\n\n return can_cors\n\n @classmethod\n def ft_can_cor(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n can_cors: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n \"\"\"Compute canonical correlations of data.\n\n The canonical correlations are calculated between the attributes\n in ``N`` and the binarized (one-hot encoded) version of ``y``.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations.\n\n Returns\n -------\n :obj:`np.ndarray`\n Canonical correlations of the data.\n\n References\n ----------\n .. [1] Alexandros Kalousis. Algorithm Selection via Meta-Learning.\n PhD thesis, Faculty of Science of the University of Geneva, 2002.\n \"\"\"\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n return can_cors\n\n @classmethod\n def ft_gravity(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n norm_ord: t.Union[int, float] = 2,\n classes: t.Optional[np.ndarray] = None,\n class_freqs: t.Optional[np.ndarray] = None,\n cls_inds: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute the distance between minority and majority classes center\n of mass.\n\n The center of mass of a class is the average value of each attribute\n between instances of the same class.\n\n The majority and minority classes cannot be the same, even if every\n class has the same number of instances.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n norm_ord : :obj:`numeric`, optional\n Minkowski Distance parameter. Minkowski Distance has the following\n popular cases for this argument value\n\n +-----------+---------------------------+\n |norm_ord | Distance name |\n +-----------+---------------------------+\n |-> -inf | Min value |\n +-----------+---------------------------+\n |1.0 | Manhattan/City Block |\n +-----------+---------------------------+\n |2.0 | Euclidean |\n +-----------+---------------------------+\n |-> +inf | Max value (infinite norm) |\n +-----------+---------------------------+\n\n classes : :obj:`np.ndarray`, optional\n Distinct classes of ``y``.\n\n class_freqs : :obj:`np.ndarray`, optional\n Absolute frequencies of each distinct class in target attribute\n ``y`` or ``classes``. If ``classes`` is given, then this argument\n must be paired with it by index.\n\n cls_inds : :obj:`np.ndarray`, optional\n Boolean array which indicates the examples of each class.\n The rows represents each distinct class, and the columns\n represents the instances. Used to take advantage of\n precomputations.\n\n Returns\n -------\n float\n Gravity of the numeric dataset.\n\n Raises\n ------\n :obj:`ValueError`\n If ``norm_ord`` is not numeric.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith. On learning algorithm\n selection for classification. Applied Soft Computing,\n 6(2):119 – 138, 2006.\n \"\"\"\n if classes is None or class_freqs is None:\n classes, class_freqs = np.unique(y, return_counts=True)\n\n _classes = np.asarray(classes) # type: np.ndarray\n _class_freqs = np.asarray(class_freqs, dtype=int) # type: np.ndarray\n\n ind_cls_maj = np.argmax(_class_freqs)\n class_maj = _classes[ind_cls_maj]\n\n _classes = np.delete(_classes, ind_cls_maj)\n _class_freqs = np.delete(_class_freqs, ind_cls_maj)\n\n ind_cls_min = np.argmin(_class_freqs)\n\n if cls_inds is not None:\n insts_cls_maj = N[cls_inds[ind_cls_maj, :], :]\n # Adjusting minoritary class index due to 'delete' operation\n ind_cls_min += ind_cls_min >= ind_cls_maj\n insts_cls_min = N[cls_inds[ind_cls_min, :], :]\n\n else:\n class_min = _classes[ind_cls_min]\n insts_cls_maj = N[y == class_maj, :]\n insts_cls_min = N[y == class_min, :]\n\n gravity = np.linalg.norm(\n insts_cls_maj.mean(axis=0) - insts_cls_min.mean(axis=0),\n ord=norm_ord,\n )\n\n return gravity\n\n @classmethod\n def ft_cor(\n cls, N: np.ndarray, abs_corr_mat: t.Optional[np.ndarray] = None\n ) -> np.ndarray:\n \"\"\"Compute the absolute value of the correlation of distinct dataset\n column pairs.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n abs_corr_mat : :obj:`np.ndarray`, optional\n Absolute correlation matrix of ``N``. Argument used to exploit\n precomputations.\n\n Returns\n -------\n :obj:`np.ndarray`\n Absolute value of correlation between distinct attributes.\n\n References\n ----------\n .. [1] Ciro Castiello, Giovanna Castellano, and Anna Maria Fanelli.\n Meta-data: Characterization of input features for meta-learning.\n In 2nd International Conference on Modeling Decisions for\n Artificial Intelligence (MDAI), pages 457–468, 2005.\n .. [2] Matthias Reif, Faisal Shafait, Markus Goldstein, Thomas Breuel,\n and Andreas Dengel. Automatic classifier selection for non-experts.\n Pattern Analysis and Applications, 17(1):83–96, 2014.\n .. [3] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and\n John Campbell. Machine Learning, Neural and Statistical\n Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.\n \"\"\"\n if abs_corr_mat is None:\n abs_corr_mat = np.abs(np.corrcoef(N, rowvar=False))\n\n res_num_rows, _ = abs_corr_mat.shape\n\n inf_triang_vals = abs_corr_mat[np.tril_indices(res_num_rows, k=-1)]\n\n return np.abs(inf_triang_vals)\n\n @classmethod\n def ft_cov(\n cls,\n N: np.ndarray,\n ddof: int = 1,\n cov_mat: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n \"\"\"Compute the absolute value of the covariance of distinct dataset\n attribute pairs.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n ddof : int, optional\n Degrees of freedom for covariance matrix.\n\n cov_mat : :obj:`np.ndarray`, optional\n Covariance matrix of ``N``. Argument meant to exploit\n precomputations. Note that this argument value is not the same as\n this method return value, as it only returns the lower-triangle\n values from ``cov_mat``.\n\n Returns\n -------\n :obj:`np.ndarray`\n Absolute value of covariances between distinct attributes.\n\n References\n ----------\n .. [1] Ciro Castiello, Giovanna Castellano, and Anna Maria Fanelli.\n Meta-data: Characterization of input features for meta-learning.\n In 2nd International Conference on Modeling Decisions for\n Artificial Intelligence (MDAI), pages 457–468, 2005.\n .. [2] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and\n John Campbell. Machine Learning, Neural and Statistical\n Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.\n \"\"\"\n if cov_mat is None:\n cov_mat = np.cov(N, rowvar=False, ddof=ddof)\n\n res_num_rows, _ = cov_mat.shape\n\n inf_triang_vals = cov_mat[np.tril_indices(res_num_rows, k=-1)]\n\n return np.abs(inf_triang_vals)\n\n @classmethod\n def ft_nr_disc(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n can_cors: t.Optional[np.ndarray] = None,\n ) -> t.Union[int, float]:\n \"\"\"Compute the number of canonical correlation between each attribute\n and class.\n\n This method return value is effectively the size of the return value\n of ``ft_can_cor`` method. Check its documentation for more in-depth\n details.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations.\n\n Returns\n -------\n int or float\n Number of canonical correlations between each attribute and\n class, if ``ft_can_cor`` is executed successfully. Returns\n :obj:`np.nan` otherwise.\n\n References\n ----------\n .. [1] Guido Lindner and Rudi Studer. AST: Support for algorithm\n selection with a CBR approach. In European Conference on\n Principles of Data Mining and Knowledge Discovery (PKDD),\n pages 418 – 423, 1999.\n \"\"\"\n if can_cors is None:\n can_cors = cls.ft_can_cor(N=N, y=y)\n\n return can_cors.size\n\n @classmethod\n def ft_eigenvalues(\n cls,\n N: np.ndarray,\n ddof: int = 1,\n cov_mat: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n \"\"\"Compute the eigenvalues of covariance matrix from dataset.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n ddof : int, optional\n Degrees of freedom for covariance matrix.\n\n cov_mat : :obj:`np.ndarray`, optional\n Covariance matrix of ``N``. Argument meant to exploit\n precomputations.\n\n Returns\n -------\n :obj:`np.ndarray`\n Eigenvalues of ``N`` covariance matrix.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith. On learning algorithm\n selection for classification. Applied Soft Computing,\n 6(2):119 – 138, 2006.\n \"\"\"\n if cov_mat is None:\n cov_mat = np.cov(N, rowvar=False, ddof=ddof)\n\n return np.linalg.eigvalsh(cov_mat)\n\n @classmethod\n def ft_g_mean(\n cls, N: np.ndarray, allow_zeros: bool = True, epsilon: float = 1.0e-10\n ) -> np.ndarray:\n \"\"\"Compute the geometric mean of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n allow_zeros : :obj:`bool`, optional\n If True, then the geometric mean of all attributes with zero values\n is set to zero. Otherwise, is set to :obj:`np.nan` these values.\n\n epsilon : float, optional\n A small value which all values with absolute value lesser than it\n is considered zero-valued. Used only if ``allow_zeros`` is False.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute geometric means.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith-Miles. A meta-learning approach\n to automatic kernel selection for support vector machines.\n Neurocomputing, 70(1):173 – 186, 2006.\n \"\"\"\n min_values = N.min(axis=0)\n\n if allow_zeros:\n cols_invalid = min_values < 0.0\n cols_zero = np.logical_and(min_values >= 0.0, min_values < epsilon)\n cols_valid = np.logical_not(np.logical_or(cols_invalid, cols_zero))\n\n else:\n cols_invalid = min_values <= epsilon\n cols_valid = np.logical_not(cols_invalid)\n\n _, num_col = N.shape\n g_mean = np.zeros(num_col, dtype=float)\n\n g_mean[cols_valid] = scipy.stats.gmean(N[:, cols_valid], axis=0)\n\n g_mean[cols_invalid] = np.nan\n\n # Note: the R MFE version can favors infinities over real values,\n # which is summarized as 'nan'. This version always tries to pick\n # a real value whenever it is available.\n return g_mean\n\n @classmethod\n def ft_h_mean(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the harmonic mean of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute harmonic means.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith-Miles. A meta-learning approach\n to automatic kernel selection for support vector machines.\n Neurocomputing, 70(1):173 – 186, 2006.\n \"\"\"\n return scipy.stats.hmean(N, axis=0)\n\n @classmethod\n def ft_iq_range(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the interquartile range (IQR) of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute interquartile ranges.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith-Miles. A meta-learning approach\n to automatic kernel selection for support vector machines.\n Neurocomputing, 70(1):173 – 186, 2006.\n \"\"\"\n return scipy.stats.iqr(N, axis=0)\n\n @classmethod\n def ft_kurtosis(\n cls, N: np.ndarray, method: int = 3, bias: bool = True\n ) -> np.ndarray:\n \"\"\"Compute the kurtosis of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n method : int, optional\n Defines the strategy used for estimate data kurtosis. Used for\n total compatibility with R package ``e1071``. The options must be\n one of the following:\n\n +--------+-----------------------------------------------+\n |Option | Formula |\n +--------+-----------------------------------------------+\n |1 | Kurt_1 = (m_4 / m_2**2 - 3) |\n | | (default of `scipy.stats` package) |\n +--------+-----------------------------------------------+\n |2 | Kurt_2 = (((n+1) * Kurt_1 + 6) * (n-1) / f_2),|\n | | f_2 = ((n-2)*(n-3)) |\n +--------+-----------------------------------------------+\n |3 | Kurt_3 = (m_4 / s**4 - 3) |\n | | = ((Kurt_1+3) * (1 - 1/n)**2 - 3) |\n +--------+-----------------------------------------------+\n\n Where `n` is the number of instances in ``N``, `s` is the standard\n deviation of each attribute in ``N``, and `m_i` is the ith\n statistical momentum of each attribute in ``N``.\n\n Note that if the selected method is unable to be calculated due\n to division by zero, then the first method is used instead.\n\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute kurtosis.\n\n References\n ----------\n .. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and\n John Campbell. Machine Learning, Neural and Statistical\n Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.\n \"\"\"\n kurt_arr = np.apply_along_axis(\n func1d=_summary.sum_kurtosis,\n axis=0,\n arr=N,\n method=method,\n bias=bias,\n )\n\n return kurt_arr\n\n @classmethod\n def ft_mad(cls, N: np.ndarray, factor: float = 1.4826) -> np.ndarray:\n \"\"\"Compute the Median Absolute Deviation (MAD) adjusted by a factor.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n factor : float, optional\n Multiplication factor for output correction. The default ``factor``\n is 1.4826 since it is an approximated result of MAD of a normally\n distributed data (with any mean and standard deviation of 1.0), so\n it makes this method result comparable with this sort of data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute MAD (Median Absolute Deviation.)\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith. On learning algorithm\n selection for classification. Applied Soft Computing,\n 6(2):119 – 138, 2006.\n \"\"\"\n return scipy.stats.median_abs_deviation(\n x=N, axis=0, scale=1.0 / factor\n )\n\n @classmethod\n def ft_max(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the maximum value from each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute maximum values.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return np.asfarray(N.max(axis=0))\n\n @classmethod\n def ft_mean(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the mean value of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute mean values.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return np.asfarray(N.mean(axis=0))\n\n @classmethod\n def ft_median(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the median value from each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute median values.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return np.asfarray(np.median(N, axis=0))\n\n @classmethod\n def ft_min(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the minimum value from each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute minimum values.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return np.asfarray(N.min(axis=0))\n\n @classmethod\n def ft_nr_cor_attr(\n cls,\n N: np.ndarray,\n threshold: float = 0.5,\n normalize: bool = True,\n abs_corr_mat: t.Optional[np.ndarray] = None,\n ) -> t.Union[int, float]:\n \"\"\"Compute the number of distinct highly correlated pair of attributes.\n\n A pair of attributes is considered highly correlated if the\n absolute value of its covariance is equal or larger than a\n given ``threshold``.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n threshold : float, optional\n A value of the threshold, where correlation is assumed to be strong\n if its absolute value is equal or greater than it.\n\n normalize : bool, optional\n If True, the result is normalized by a factor of 2/(d*(d-1)), where\n `d` is number of attributes (columns) in ``N``.\n\n abs_corr_mat : :obj:`np.ndarray`, optional\n Absolute correlation matrix of ``N``. Argument used to exploit\n precomputations.\n\n Returns\n -------\n int | float\n If ``normalize`` is False, this method returns the number of\n highly correlated pair of distinct attributes. Otherwise,\n return the proportion of highly correlated attributes.\n\n References\n ----------\n .. [1] Mostafa A. Salama, Aboul Ella Hassanien, and Kenneth Revett.\n Employment of neural network and rough set in meta-learning.\n Memetic Computing, 5(3):165 – 177, 2013.\n \"\"\"\n abs_corr_vals = cls.ft_cor(N, abs_corr_mat=abs_corr_mat)\n\n _, num_attr = N.shape\n\n norm_factor = 1.0\n\n if normalize:\n norm_factor = 2.0 / (num_attr * (num_attr - 1.0))\n\n return np.sum(abs_corr_vals >= threshold) * norm_factor\n\n @classmethod\n def ft_nr_norm(\n cls,\n N: np.ndarray,\n method: str = \"shapiro-wilk\",\n threshold: float = 0.05,\n failure: str = \"soft\",\n max_samples: int = 5000,\n ) -> t.Union[float, int]:\n \"\"\"Compute the number of attributes normally distributed based in a\n given method.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n method : str, optional\n Select the normality test to be executed. This argument must assume\n one of the options shown below:\n\n - shapiro-wilk: from `scipy.stats.shapiro` documentation: the\n Shapiro-Wilk test tests the null hypothesis that the data was\n drawn from a normal distribution.\n\n - dagostino-pearson: from `scipy.stats.normaltest` documentation:\n It is based on D'Agostino and Pearson's, test that combines skew\n and kurtosis to produce an omnibus test of normality.\n\n - anderson-darling: from `scipy.stats.anderson` documentation: The\n Anderson-Darling tests the null hypothesis that a sample is\n drawn from a population that follows a particular distribution.\n In this method context, that `particular distribution` is fixed\n in the normal/gaussian.\n\n - all: perform all tests cited above. To consider an attribute\n normaly distributed all test results are taken into account with\n equal weight. Check ``failure`` argument for more information.\n\n threshold : float, optional\n Level of significance used to reject the null hypothesis of\n normality tests.\n\n failure : str, optional\n Used only if ``method`` argument value is `all`. This argument\n must assumed one value between `soft` or `hard`. If `soft`, then if\n a single test have its null hypothesis (which all states the data\n follows a Guassian distribution) rejected for some attribute, then\n that attribute is already considered normally distributed. If value\n is `hard`, then is necessary the rejection of the null hypothesis\n of every single normality test to consider the attribute normally\n distributed.\n\n max_samples : int, optional\n Max samples used while performing the normality tests.\n Shapiro-Wilks test p-value may not be accurate when sample size is\n higher than 5000. Note that the instances are NOT shuffled before\n doing this cutoff. This means that the very first ``max_samples``\n instances of the dataset ``N`` will be considered in the\n statistical tests.\n\n Returns\n -------\n int\n The number of normally distributed attributes based on the\n ``method``. If ``max_samples`` is non-positive, :obj:`np.nan`\n is returned instead.\n\n Raises\n ------\n ValueError\n If ``method`` or ``failure`` is not a valid option.\n\n References\n ----------\n .. [1] Christian Kopf, Charles Taylor, and Jorg Keller. Meta-Analysis:\n From data characterisation for meta-learning to meta-regression. In\n PKDD Workshop on Data Mining, Decision Support, Meta-Learning and\n Inductive Logic Programming, pages 15 – 26, 2000.\n \"\"\"\n accepted_tests = (\n \"shapiro-wilk\",\n \"dagostino-pearson\",\n \"anderson-darling\",\n \"all\",\n )\n\n if method not in accepted_tests:\n raise ValueError(\n \"Unknown method {0}. Select one between {1}\".format(\n method, accepted_tests\n )\n )\n\n if failure not in (\"hard\", \"soft\"):\n raise ValueError(\n '\"failure\" argument must be either \"soft\" '\n 'or \"hard\" (got \"{}\").'.format(failure)\n )\n\n if max_samples <= 0:\n return np.nan\n\n num_inst, num_attr = N.shape\n\n max_row_index = min(max_samples, num_inst)\n\n test_results = []\n\n if method in (\"shapiro-wilk\", \"all\"):\n _, p_values_shapiro = np.apply_along_axis(\n func1d=scipy.stats.shapiro, axis=0, arr=N[:max_row_index, :]\n )\n\n test_results.append(p_values_shapiro > threshold)\n\n if method in (\"dagostino-pearson\", \"all\"):\n _, p_values_dagostino = scipy.stats.normaltest(\n N[:max_row_index, :], axis=0\n )\n\n test_results.append(p_values_dagostino > threshold)\n\n if method in (\"anderson-darling\", \"all\"):\n anderson_stats = np.repeat(False, num_attr)\n\n for attr_ind, attr_vals in enumerate(N[:max_row_index, :].T):\n stat_value, crit_values, signif_levels = scipy.stats.anderson(\n attr_vals, dist=\"norm\"\n )\n\n # As scipy.stats.anderson gives critical values for fixed\n # significance levels, then the strategy adopted is to use\n # the nearest possible from the given threshold as an esti-\n # mator.\n stat_index = np.argmin(abs(signif_levels - threshold))\n crit_val = crit_values[stat_index]\n\n anderson_stats[attr_ind] = stat_value <= crit_val\n\n test_results.append(anderson_stats)\n\n if failure == \"soft\":\n attr_is_normal = np.any(test_results, axis=0)\n\n else:\n attr_is_normal = np.all(test_results, axis=0)\n\n return float(np.sum(attr_is_normal))\n\n @classmethod\n def ft_nr_outliers(cls, N: np.ndarray, whis: float = 1.5) -> int:\n \"\"\"Compute the number of attributes with at least one outlier value.\n\n An attribute has outlier if some value is outside the closed interval\n [first_quartile - WHIS * IQR, third_quartile + WHIS * IQR], where IQR\n is the Interquartile Range (third_quartile - first_quartile), and WHIS\n value is typically `1.5`.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n whis : float, optional\n A factor to multiply IQR and set up non-outlier interval\n (as stated above). Higher values make the interval more\n significant, thus increasing the tolerance against outliers, where\n lower values decrease non-outlier interval and, therefore, creates\n less tolerance against possible outliers.\n\n Returns\n -------\n int\n Number of attributes with at least one outlier.\n\n References\n ----------\n .. [1] Christian Kopf and Ioannis Iglezakis. Combination of task\n description strategies and case base properties for meta-learning.\n In 2nd ECML/PKDD International Workshop on Integration and\n Collaboration Aspects of Data Mining, Decision Support and\n Meta-Learning(IDDM), pages 65 – 76, 2002.\n .. [2] Peter J. Rousseeuw and Mia Hubert. Robust statistics for\n outlier detection. Wiley Interdisciplinary Reviews: Data Mining\n and Knowledge Discovery, 1(1):73 – 79, 2011.\n \"\"\"\n v_min, q_1, q_3, v_max = np.percentile(N, (0, 25, 75, 100), axis=0)\n\n whis_iqr = whis * (q_3 - q_1)\n\n cut_low = q_1 - whis_iqr\n cut_high = q_3 + whis_iqr\n\n return np.sum(np.logical_or(cut_low > v_min, cut_high < v_max))\n\n @classmethod\n def ft_range(cls, N: np.ndarray) -> np.ndarray:\n \"\"\"Compute the range (max - min) of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute ranges.\n\n References\n ----------\n .. [1] Shawkat Ali and Kate A. Smith-Miles. A meta-learning approach\n to automatic kernel selection for support vector machines.\n Neurocomputing, 70(1):173 – 186, 2006.\n \"\"\"\n return np.asfarray(np.ptp(N, axis=0))\n\n @classmethod\n def ft_sd(cls, N: np.ndarray, ddof: int = 1) -> np.ndarray:\n \"\"\"Compute the standard deviation of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n ddof : float, optional\n Degrees of freedom for standard deviation.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute standard deviations.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return np.asfarray(N.std(axis=0, ddof=ddof))\n\n @classmethod\n def ft_sd_ratio(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n ddof: int = 1,\n classes: t.Optional[np.ndarray] = None,\n class_freqs: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute a statistical test for homogeneity of covariances.\n\n The test applied is the Box's M Test for equivalence of\n covariances.\n\n The null hypothesis of this test states that the covariance\n matrices of the instances of every class are equal.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n ddof : int, optional\n Degrees of freedom for covariance matrix, calculated during this\n test.\n\n classes : :obj:`np.ndarray`, optional\n All distinct classes in target attribute ``y``. Used to exploit\n precomputations.\n\n class_freqs : :obj:`np.ndarray`, optional\n Absolute frequencies of each distinct class in target attribute\n ``y`` or ``classes``. If ``classes`` is given, then this argument\n must be paired with it by index.\n\n Returns\n -------\n float\n Homogeneity of covariances test result.\n\n Notes\n -----\n For details about how this test is applied, check out `Rivolli\n et al.`_ (pag. 32).\n\n .. _Rivolli et al.:\n \"Towards Reproducible Empirical Research in Meta-Learning,\"\n Rivolli et al. URL: https://arxiv.org/abs/1808.10406\n\n References\n ----------\n .. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and\n John Campbell. Machine Learning, Neural and Statistical\n Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.\n\n \"\"\"\n\n def calc_sample_cov_mat(N, y, ddof):\n \"\"\"Calculate the Sample Covariance Matrix for each class.\"\"\"\n sample_cov_matrices = np.array(\n [\n np.cov(N[y == cl, :], rowvar=False, ddof=ddof)\n for cl in classes\n ]\n )\n\n return np.flip(m=sample_cov_matrices, axis=(0, 1))\n\n def calc_pooled_cov_mat(\n sample_cov_matrices: np.ndarray,\n vec_weight: np.ndarray,\n num_inst: int,\n num_classes: int,\n ) -> np.ndarray:\n \"\"\"Calculate the Pooled Covariance Matrix.\"\"\"\n pooled_cov_mat = np.array(\n [\n weight * S_i\n for weight, S_i in zip(vec_weight, sample_cov_matrices)\n ]\n ).sum(axis=0) / (num_inst - num_classes)\n\n return np.asfarray(pooled_cov_mat)\n\n def calc_gamma_factor(num_col, num_classes, num_inst):\n \"\"\"Calculate the gamma factor which adjust the output.\"\"\"\n gamma = 1.0 - (\n (2.0 * num_col ** 2.0 + 3.0 * num_col - 1.0)\n / (6.0 * (num_col + 1.0) * (num_classes - 1.0))\n ) * (np.sum(1.0 / vec_weight) - 1.0 / (num_inst - num_classes))\n return gamma\n\n def calc_m_factor(\n sample_cov_matrices: np.ndarray,\n pooled_cov_mat: np.ndarray,\n num_inst: int,\n num_classes: int,\n gamma: float,\n vec_weight: np.ndarray,\n ) -> float:\n \"\"\"Calculate the M factor.\"\"\"\n vec_logdet = [\n np.log(np.linalg.det(S_i)) for S_i in sample_cov_matrices\n ]\n\n m_factor = gamma * (\n (num_inst - num_classes)\n * np.log(np.linalg.det(pooled_cov_mat))\n - np.dot(vec_weight, vec_logdet)\n )\n\n return m_factor\n\n num_inst, num_col = N.shape\n\n if classes is None or class_freqs is None:\n classes, class_freqs = np.unique(y, return_counts=True)\n\n _classes = np.asarray(classes)\n _class_freqs = np.asarray(class_freqs, dtype=int)\n\n num_classes = _classes.size\n sample_cov_matrices = calc_sample_cov_mat(N, y, ddof)\n vec_weight = _class_freqs - 1.0\n\n pooled_cov_mat = calc_pooled_cov_mat(\n sample_cov_matrices, vec_weight, num_inst, num_classes\n )\n\n gamma = calc_gamma_factor(num_col, num_classes, num_inst)\n\n m_factor = calc_m_factor(\n sample_cov_matrices,\n pooled_cov_mat,\n num_inst,\n num_classes,\n gamma,\n vec_weight,\n )\n\n if np.isinf(m_factor):\n return np.nan\n\n return float(np.exp(m_factor / (num_col * (num_inst - num_classes))))\n\n @classmethod\n def ft_skewness(\n cls, N: np.ndarray, method: int = 3, bias: bool = True\n ) -> np.ndarray:\n \"\"\"Compute the skewness for each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n method : int, optional\n Defines the strategy used for estimate data skewness. This argument\n is used fo compatibility with R package `e1071`. The options must\n be one of the following:\n\n +--------+-----------------------------------------------+\n |Option | Formula |\n +--------+-----------------------------------------------+\n |1 | Skew_1 = m_3 / m_2**(3/2) |\n | | (default of ``scipy.stats``) |\n +--------+-----------------------------------------------+\n |2 | Skew_2 = Skew_1 * sqrt(n(n-1)) / (n-2) |\n +--------+-----------------------------------------------+\n |3 | Skew_3 = m_3 / s**3 = Skew_1 ((n-1)/n)**(3/2) |\n +--------+-----------------------------------------------+\n\n Where `n` is the number of instances in ``N``, `s` is the standard\n deviation of each attribute in ``N``, and `m_i` is the ith\n statistical momentum of each attribute in ``N``.\n\n Note that if the selected method is unable to be calculated due to\n division by zero, then the first method will be used instead.\n\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute skewness.\n\n References\n ----------\n .. [1] Donald Michie, David J. Spiegelhalter, Charles C. Taylor, and\n John Campbell. Machine Learning, Neural and Statistical\n Classification, volume 37. Ellis Horwood Upper Saddle River, 1994.\n \"\"\"\n skew_arr = np.apply_along_axis(\n func1d=_summary.sum_skewness,\n axis=0,\n arr=N,\n bias=bias,\n method=method,\n )\n\n return skew_arr\n\n @classmethod\n def ft_sparsity(cls, X: np.ndarray, normalize: bool = True) -> np.ndarray:\n \"\"\"Compute (possibly normalized) sparsity metric for each attribute.\n\n Sparsity `S` of a vector `v` of numeric values is defined as\n\n S(v) = (1.0 / (n - 1.0)) * ((n / phi(v)) - 1.0),\n\n where\n - `n` is the number of instances in dataset ``X``.\n - `phi(v)` is the number of distinct values in `v`.\n\n Parameters\n ----------\n X : :obj:`np.ndarray`\n Fitted numerical data.\n\n normalize : bool, optional\n If True, then the output will be S(v) as shown above. Otherwise,\n the output is not be multiplied by the `(1.0 / (n - 1.0))` factor\n (i.e. new output is defined as S'(v) = ((n / phi(v)) - 1.0)).\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute sparsities.\n\n References\n ----------\n .. [1] Mostafa A. Salama, Aboul Ella Hassanien, and Kenneth Revett.\n Employment of neural network and rough set in meta-learning.\n Memetic Computing, 5(3):165 – 177, 2013.\n \"\"\"\n ans = np.array([attr.size / np.unique(attr).size for attr in X.T])\n\n num_inst, _ = X.shape\n\n norm_factor = 1.0\n if normalize:\n norm_factor = 1.0 / (num_inst - 1.0)\n\n return (ans - 1.0) * norm_factor\n\n @classmethod\n def ft_t_mean(cls, N: np.ndarray, pcut: float = 0.2) -> np.ndarray:\n \"\"\"Compute the trimmed mean of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n pcut : float, optional\n Percentage of cut from both the `lower` and `higher` values.\n This value should be in interval [0.0, 0.5), where if 0.0 the\n return value is the default mean calculation.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute trimmed means.\n\n References\n ----------\n .. [1] Robert Engels and Christiane Theusinger. Using a data metric for\n preprocessing advice for data mining applications. In 13th European\n Conference on on Artificial Intelligence (ECAI), pages 430 – 434,\n 1998.\n \"\"\"\n return scipy.stats.trim_mean(N, proportiontocut=pcut)\n\n @classmethod\n def ft_var(cls, N: np.ndarray, ddof: int = 1) -> np.ndarray:\n \"\"\"Compute the variance of each attribute.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n ddof : float, optional\n Degrees of freedom for variance.\n\n Returns\n -------\n :obj:`np.ndarray`\n Attribute variances.\n\n References\n ----------\n .. [1] Ciro Castiello, Giovanna Castellano, and Anna Maria Fanelli.\n Meta-data: Characterization of input features for meta-learning.\n In 2nd International Conference on Modeling Decisions for\n Artificial Intelligence (MDAI), pages 457–468, 2005.\n \"\"\"\n return np.asfarray(N.var(axis=0, ddof=ddof))\n\n @classmethod\n def ft_w_lambda(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n can_cor_eigvals: t.Optional[np.ndarray] = None,\n can_cors: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute the Wilks' Lambda value.\n\n The Wilk's Lambda L is calculated as:\n\n L = prod(1.0 / (1.0 + can_cor_eig_i))\n\n Where `can_cor_eig_i` is the ith eigenvalue related to the ith\n canonical correlation `can_cor_i` between the attributes in ``N``\n and the binarized (one-hot encoded) version of ``y``.\n\n The relationship between `can_cor_eig_i` and `can_cor_i` is\n given by:\n\n can_cor_i = sqrt(can_cor_eig_i / (1 + can_cor_eig_i))\n\n Or, equivalently:\n\n can_cor_eig_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Fitted numerical data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n can_cor_eigvals : :obj:`np.ndarray`, optional\n Eigenvalues associated with the canonical correlations of\n ``N`` and one-hot encoded ``y``. This argument is used to\n exploit precomputations. The relationship between the ith\n canonical correlation `can_cor_i` and its eigenvalue is:\n\n can_cor_i = sqrt(can_cor_eigval_i / (1 + can_cor_eigval_i))\n\n Or, equivalently:\n\n can_cor_eigval_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations. Used only if ``can_cor_eigvals`` is None.\n\n Returns\n -------\n float\n Wilk's lambda value.\n\n References\n ----------\n .. [1] Guido Lindner and Rudi Studer. AST: Support for algorithm\n selection with a CBR approach. In European Conference on\n Principles of Data Mining and Knowledge Discovery (PKDD),\n pages 418 – 423, 1999.\n \"\"\"\n if can_cor_eigvals is None:\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n can_cor_eigvals = cls._can_cor_to_eigval(can_cors)\n\n if can_cor_eigvals.size == 0:\n return np.nan\n\n # return float(np.prod(1 / (1 + can_cor_eigvals)))\n return float(np.exp(-np.sum(np.log1p(can_cor_eigvals))))\n\n @classmethod\n def ft_p_trace(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n can_cors: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute the Pillai's trace.\n\n The Pillai's trace is the sum of the squared canonical\n correlations of ``N`` and the one-hot encoded version of ``y``.\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Numerical fitted data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations.\n\n Returns\n -------\n float\n Pillai's trace value.\n\n References\n ----------\n .. [1] Pillai K.C.S (1955). Some New test criteria in multivariate\n analysis. Ann Math Stat: 26(1):117–21. Seber, G.A.F. (1984).\n Multivariate Observations. New York: John Wiley and Sons.\n \"\"\"\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n if can_cors.size == 0: # type: ignore\n return np.nan\n\n return float(np.sum(np.square(can_cors)))\n\n @classmethod\n def ft_lh_trace(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n can_cor_eigvals: t.Optional[np.ndarray] = None,\n can_cors: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute the Lawley-Hotelling trace.\n\n The Lawley-Hotelling trace LH is given by:\n\n LH = sum_{i} can_cor_i**2 / (1 - can_cor_i**2)\n\n Where `can_cor_i` is the ith canonical correlation of\n ``N`` and the one-hot encoded version of ``y``.\n\n Equivalently, LH can be calculated from the eigenvalues\n related to each canonical correlation due to the relationship:\n\n can_cor_eigval_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n Therefore, LH is given simply by:\n\n LH = sum_{i} can_cor_eigval_i\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Numerical fitted data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n can_cor_eigvals : :obj:`np.ndarray`, optional\n Eigenvalues associated with the canonical correlations of\n ``N`` and one-hot encoded ``y``. This argument is used to\n exploit precomputations. The relationship between the ith\n canonical correlation ``can_cor_i`` and its eigenvalue is:\n\n can_cor_i = sqrt(can_cor_eigval_i / (1 + can_cor_eigval_i))\n\n Or, equivalently:\n\n can_cor_eigval_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations. Used only if ``can_cor_eigvals`` is None.\n\n Returns\n -------\n float\n Lawley-Hotelling trace value.\n\n References\n ----------\n .. [1] Lawley D. A Generalization of Fisher’s z Test. Biometrika.\n 1938;30(1):180-187.\n .. [2] Hotelling H. A generalized T test and measure of multivariate\n dispersion. In: Neyman J, ed. Proceedings of the Second Berkeley\n Symposium on Mathematical Statistics and Probability. Berkeley:\n University of California Press; 1951:23-41.\n \"\"\"\n if can_cor_eigvals is None:\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n can_cor_eigvals = cls._can_cor_to_eigval(can_cors)\n\n if can_cor_eigvals.size == 0: # type: ignore\n return np.nan\n\n return float(np.sum(can_cor_eigvals))\n\n @classmethod\n def ft_roy_root(\n cls,\n N: np.ndarray,\n y: np.ndarray,\n criterion: str = \"eigval\",\n can_cors: t.Optional[np.ndarray] = None,\n can_cor_eigvals: t.Optional[np.ndarray] = None,\n ) -> float:\n \"\"\"Compute the Roy's largest root.\n\n The Roy's largest root RLR can be computed using two distinct\n approaches (see references for further explanation.)\n\n 1. Based on Roy's (ii) original hypothesis: formulated using the\n largest eigenvalue associated with the canonical correlations\n between ``N`` and the one-hot encoded version of ``y``. That\n is, the Roy's Largest Root RLR_a can be defined as:\n\n RLR_a = max_{I} can_cor_eig_val_i\n\n It is in range [0, +inf).\n\n 2. Based on Roy's (iii) original hypothesis: formulated using\n the largest squared canonical correlations of ``N``and the one-\n hot encoded version of ``y``. Therefore, the Roy's Largest Root\n RLR_b can be defined as:\n\n RLR_b = max_{i} can_cor_i**2\n\n It is in range [0, 1].\n\n Note that both statistics have different meanings and, therefore,\n will assume distinct values.\n\n Which formulation is used can be controled using the ``criterion``\n argument (see below for more information.)\n\n Parameters\n ----------\n N : :obj:`np.ndarray`\n Numerical fitted data.\n\n y : :obj:`np.ndarray`\n Target attribute.\n\n criterion : str, optional\n If `eigval`, calculate the Roy's largest root as the largest\n eigenvalue associated with each canonical correlation.\n This is the first formulation described above. If `cancor`,\n calculate the Roy's largest root as the largest squared\n canonical correlation. This is the second formulation above.\n\n can_cors : :obj:`np.ndarray`, optional\n Canonical correlations between ``N`` and the one-hot encoded\n version of ``y``. Argument used to take advantage of\n precomputations. Used only if ``criterion`` is `cancor` or,\n if otherwise, ``can_cor_eigvals`` argument is None.\n\n can_cor_eigvals : :obj:`np.ndarray`, optional\n Eigenvalues associated with the canonical correlations of\n ``N`` and one-hot encoded ``y``. This argument is used to\n exploit precomputations. The relationship between the ith\n canonical correlation ``can_cor_i`` and its eigenvalue is:\n\n can_cor_i = sqrt(can_cor_eigval_i / (1 + can_cor_eigval_i))\n\n Or, equivalently:\n\n can_cor_eigval_i = can_cor_i**2 / (1 - can_cor_i**2)\n\n This argument is used only if ``criterion`` argument is\n `eigval`.\n\n Returns\n -------\n float\n Roy's largest root calculated based on criterion defined by the\n ``criterion`` argument.\n\n References\n ----------\n .. [1] Roy SN. On a Heuristic Method of Test Construction and its\n use in Multivariate Analysis. Ann Math Stat. 1953;24(2):220-238.\n .. [2] A note on Roy's largest root. Kuhfeld, W.F. Psychometrika (1986)\n 51: 479. https://doi.org/10.1007/BF02294069\n \"\"\"\n VALID_CRITERIA = (\"eigval\", \"cancor\")\n\n if criterion not in VALID_CRITERIA:\n raise ValueError(\n \"Roy's largest root 'criterion' must be in {}.\".format(\n VALID_CRITERIA\n )\n )\n\n if criterion == \"eigval\":\n if can_cor_eigvals is None:\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n can_cor_eigvals = cls._can_cor_to_eigval(can_cors)\n\n values = can_cor_eigvals\n\n else:\n if can_cors is None:\n can_cors = cls._calc_can_cors(N=N, y=y)\n\n values = np.square(can_cors)\n\n if values.size == 0: # type: ignore\n return np.nan\n\n return float(np.max(values))\n" ]
[ [ "numpy.dot", "numpy.asarray", "numpy.all", "numpy.max", "numpy.argmin", "numpy.any", "numpy.exp", "numpy.square", "scipy.stats.trim_mean", "numpy.unique", "numpy.tril_indices", "numpy.flatnonzero", "numpy.asfarray", "numpy.linalg.det", "numpy.argmax", "numpy.apply_along_axis", "numpy.log1p", "numpy.repeat", "numpy.zeros", "scipy.stats.gmean", "numpy.logical_not", "numpy.isnan", "numpy.median", "scipy.stats.anderson", "numpy.logical_or", "numpy.delete", "numpy.cov", "numpy.corrcoef", "numpy.logical_and", "numpy.flip", "numpy.sum", "numpy.array", "numpy.abs", "scipy.stats.normaltest", "scipy.stats.hmean", "numpy.percentile", "numpy.ptp", "scipy.stats.median_abs_deviation", "scipy.stats.iqr", "numpy.linalg.eigvalsh", "numpy.isinf" ] ]
liulhdarks/code2vec
[ "e9be5bd56990f47a0c4d27c61b317a06d85c470d" ]
[ "model/main.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n__author__ = 'lihua.llh'\n\nimport tensorflow as tf\nimport numpy as np\nfrom model.datas import *\nfrom model.model import *\nfrom model.iterator import *\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score\nimport math\n\nimport sys\nfrom os import path\n\nroot_dir = path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\nsys.path.append(root_dir)\nsys.path.append('.')\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('random_seed', 123, \"random_seed\")\nflags.DEFINE_float('lr_min', 0.00001, \"lr_min\")\nflags.DEFINE_float('lr_max', 0.003, \"lr_max\")\nflags.DEFINE_float('move_avg_decay', 0.999, \"move_avg_decay\")\nflags.DEFINE_string('optimize_algo', 'adam', \"optimize_algo\")\nflags.DEFINE_float('keep_prob', 0.5, \"keep_prob\")\nflags.DEFINE_integer('max_grad_norm', 4, \"max_grad_norm\")\nflags.DEFINE_integer('max_path_length', 80, \"max_path_length\")\nflags.DEFINE_integer('batch_size', 32, \"batch_size\")\nflags.DEFINE_integer('terminal_embed_size', 100, \"terminal_embed_size\")\nflags.DEFINE_integer('path_embed_size', 100, \"path_embed_size\")\nflags.DEFINE_integer('encode_size', 100, \"encode_size\")\nflags.DEFINE_integer('attention_size', 100, \"attention_size\")\nflags.DEFINE_integer('num_sampled', 32, \"num_sampled\")\nflags.DEFINE_string('model_path', \"./model\", \"model_path\")\nflags.DEFINE_string('summary_path', \"./summary\", \"summary_path\")\nflags.DEFINE_string('corpus_path', \"../dataset/corpus.txt\", \"corpus_path\")\nflags.DEFINE_string('path_idx_path', \"../dataset/path_idxs.txt\", \"path_idx_path\")\nflags.DEFINE_string('terminal_idx_path', \"../dataset/terminal_idxs.txt\", \"terminal_idx_path\")\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\nclass Option(object):\n def __init__(self, reader):\n self.max_path_length = FLAGS.max_path_length\n self.terminal_embed_size = FLAGS.terminal_embed_size\n self.path_embed_size = FLAGS.path_embed_size\n self.encode_size = FLAGS.encode_size\n self.attention_size = FLAGS.attention_size\n self.terminal_count = reader.terminal_idxs.count()\n self.path_count = reader.path_idxs.count()\n self.label_count = reader.label_idx.count()\n self.num_sampled = FLAGS.num_sampled\n self.training = True\n self.keep_prob = FLAGS.keep_prob\n self.test = False\n\n\ndef build_metric():\n with tf.variable_scope('metric'):\n precision_holder = tf.placeholder(dtype=tf.float32, name='precision_holder')\n recall_holder = tf.placeholder(dtype=tf.float32, name='recall_holder')\n f1_holder = tf.placeholder(dtype=tf.float32, name='f1_holder')\n acc_holder = tf.placeholder(dtype=tf.float32, name='accuracy_holder')\n precision_var = tf.Variable(0, dtype=tf.float32, name=\"precision\", trainable=False)\n recall_var = tf.Variable(0, dtype=tf.float32, name=\"recall\", trainable=False)\n f1_var = tf.Variable(0, dtype=tf.float32, name=\"f1\", trainable=False)\n acc_var = tf.Variable(0, dtype=tf.float32, name=\"accuracy\", trainable=False)\n p_update = tf.assign(precision_var, precision_holder)\n r_update = tf.assign(recall_var, recall_holder)\n f1_update = tf.assign(f1_var, f1_holder)\n acc_update = tf.assign(acc_var, acc_holder)\n metric_update = tf.group(p_update, r_update, f1_update, acc_update)\n tf.summary.scalar('precision', precision_var)\n tf.summary.scalar('recall', recall_var)\n tf.summary.scalar('f1', f1_var)\n tf.summary.scalar('accuracy', acc_var)\n return metric_update, {'p': precision_holder, 'r': recall_holder, 'f1': f1_holder, 'acc': acc_holder}\n\n\ndef train():\n reader = DataReader(FLAGS.corpus_path, FLAGS.path_idx_path, FLAGS.terminal_idx_path)\n opt = Option(reader)\n\n builder = DatasetBuilder(reader, opt)\n train_dataset = builder.train_dataset\n test_dataset = builder.test_dataset\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,\n train_dataset.output_shapes)\n train_init_op = iterator.make_initializer(train_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n batch_datas = iterator.get_next()\n inputs_start = batch_datas['inputs_start']\n inputs_path = batch_datas['inputs_path']\n inputs_end = batch_datas['inputs_end']\n labels = batch_datas['labels']\n\n metric_update, metric = build_metric()\n\n with tf.variable_scope('model'):\n train_opt = Option(reader)\n train_opt.training = True\n lr = tf.placeholder(dtype=tf.float32, name='lr')\n train_model = Code2vecModel(inputs_start, inputs_path, inputs_end, labels, train_opt)\n train_op, global_step = utils.optimize_loss_batch_norm(train_model.loss, FLAGS.optimize_algo, lr, max_grad_norm=FLAGS.max_grad_norm,\n move_avg_decay=FLAGS.move_avg_decay, momentum=0.9, opt_decay=0.9,\n global_step_val=0)\n with tf.variable_scope('model', reuse=True):\n eval_opt = Option(reader)\n eval_opt.training = False\n eval_model = Code2vecModel(inputs_start, inputs_path, inputs_end, labels, eval_opt)\n\n with tf.variable_scope('model', reuse=True):\n test_opt = Option(reader)\n test_opt.training = False\n test_opt.test = True\n start_holder = tf.placeholder(dtype=tf.int32, shape=[None, opt.max_path_length], name='start_holder')\n path_holder = tf.placeholder(dtype=tf.int32, shape=[None, opt.max_path_length], name='path_holder')\n end_holder = tf.placeholder(dtype=tf.int32, shape=[None, opt.max_path_length], name='end_holder')\n test_model = Code2vecModel(start_holder, path_holder, end_holder, None, test_opt)\n\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n with tf.Session(config=session_conf) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(train_init_op)\n\n merged_summary = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(FLAGS.summary_path, sess.graph)\n\n saver = tf.train.Saver()\n # saver.restore(sess, '/Users/lihua.llh/Desktop/studio/oss/force/model/summary/embeddings.bin-55909')\n\n last_loss = None\n last_accuracy = None\n bad_count = 0\n lr_min = FLAGS.lr_min\n lr_max = FLAGS.lr_max\n lr_decay = 10000\n iter_num = 0\n _lr = lr_max\n lr_iter_num = 0\n for i in range(10000):\n train_loss = 0\n count = 0\n while True:\n iter_num += 1\n lr_iter_num += 1\n try:\n _lr = lr_min + (lr_max - lr_min) * math.exp(-float(lr_iter_num + 1) / lr_decay)\n if iter_num > 1 and iter_num % 200 == 0:\n _, summary_str, loss, step = sess.run([train_op, merged_summary, train_model.loss, global_step], feed_dict={lr: _lr})\n summary_writer.add_summary(summary_str, step)\n else:\n _, loss, step = sess.run([train_op, train_model.loss, global_step], feed_dict={lr: _lr})\n train_loss += loss\n count += 1\n except tf.errors.OutOfRangeError:\n sess.run(train_init_op)\n break\n test_loss, accuracy, p, r, f1 = eval(sess, eval_model, batch_datas, test_init_op)\n sess.run(metric_update, feed_dict={metric['p']: p, metric['r']: r, metric['f1']: f1, metric['acc']: accuracy})\n sess.run(train_init_op)\n if i % 1 == 0:\n print(i, ' loss:', train_loss, ' test_loss:', test_loss, ' acc:', accuracy, ' p:', p,\n ' r:', r, ' f1:', f1, ' best:', last_loss,\n ' bad:', bad_count, ' lr:', _lr, ' step:', step)\n if i > 1 and i % 40 == 0:\n test(reader, sess, eval_model, batch_datas, test_init_op)\n sess.run(train_init_op)\n if last_loss is None or train_loss < last_loss or last_accuracy is None or last_accuracy < accuracy:\n if last_accuracy is None or last_accuracy < accuracy:\n export_graph(sess)\n last_loss = train_loss\n last_accuracy = accuracy\n bad_count = 0\n output_file = path.join(FLAGS.summary_path, \"embeddings.bin\")\n saver.save(sess, output_file, global_step=step)\n else:\n bad_count += 1\n if bad_count % 2 == 0 and lr_decay > 1000:\n lr_decay -= 200\n if bad_count > 10:\n print('early stop loss:', train_loss, ' bad:', bad_count)\n test(reader, sess, eval_model, batch_datas, test_init_op)\n break\n\n\ndef eval(sess, model, batch_datas, test_init_op):\n labels = batch_datas['labels']\n sess.run(test_init_op)\n sum_loss = 0\n cl_labels_idx = []\n cl_preds_idx = []\n while True:\n try:\n loss, labels_idx, outputs = sess.run([model.loss, labels, model.outputs])\n sum_loss += loss\n preds_idx = np.argmax(outputs, axis=1)\n cl_labels_idx.extend(labels_idx)\n cl_preds_idx.extend(preds_idx)\n except tf.errors.OutOfRangeError:\n break\n p = precision_score(cl_labels_idx, cl_preds_idx, average='weighted')\n r = recall_score(cl_labels_idx, cl_preds_idx, average='weighted')\n f1score = f1_score(cl_labels_idx, cl_preds_idx, average='weighted')\n accuracy = accuracy_score(cl_labels_idx, cl_preds_idx)\n return sum_loss, accuracy, p, r, f1score\n\n\ndef test(reader, sess, model, batch_datas, test_init_op):\n inputs_start = batch_datas['inputs_start']\n inputs_path = batch_datas['inputs_path']\n inputs_end = batch_datas['inputs_end']\n labels = batch_datas['labels']\n sess.run(test_init_op)\n start, path, end, tags, outputs, probs = sess.run([inputs_start, inputs_path, inputs_end, labels, model.outputs, model.attn_probs])\n for i in range(len(start)):\n start_datas = [reader.terminal_idxs.i2t[v] for v in start[i]]\n path_datas = [reader.path_idxs.i2t[v] for v in path[i]]\n end_datas = [reader.terminal_idxs.i2t[v] for v in end[i]]\n tag_datas = reader.label_idx.get_label(tags[i])\n pred_idx = np.argmax(outputs[i])\n pred_data = reader.label_idx.get_label(pred_idx)\n prob = probs[i]\n datas = zip(start_datas, path_datas, end_datas, prob[:len(start_datas)])\n if tag_datas == pred_data or i == len(start) - 1:\n for s, p, e, pb in datas:\n print(s, ' ', p, ' ', e, ' p:', pb)\n print('tag_datas:', tag_datas)\n print('pred_data:', pred_data)\n break\n\n\ndef export_graph(sess):\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,\n output_node_names=[\n 'model_2/train_classify/outputs',\n 'model_2/attention/attn_probs'])\n for node in output_graph_def.node:\n node.device = \"\"\n output_file = path.join(FLAGS.model_path, \"code2vec_model.pb\")\n with tf.gfile.FastGFile(output_file, mode='wb') as f:\n f.write(output_graph_def.SerializeToString())\n\n\ndef main(_):\n train()\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n tf.set_random_seed(123)\n random.seed(123)\n tf.app.run()\n" ]
[ [ "sklearn.metrics.f1_score", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.graph_util.convert_variables_to_constants", "tensorflow.Variable", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.app.run", "tensorflow.flags.DEFINE_boolean", "sklearn.metrics.precision_score", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.set_random_seed", "sklearn.metrics.recall_score", "tensorflow.summary.FileWriter", "numpy.random.seed", "tensorflow.assign", "tensorflow.data.Iterator.from_structure", "tensorflow.variable_scope", "tensorflow.gfile.FastGFile", "sklearn.metrics.accuracy_score" ] ]
NicEscobar/InertialNavigation
[ "85dffed6cf5c4063a097c3c9305f4ec92ce53623" ]
[ "main.py" ]
[ "#python3 main.py --device=/dev/ttyACM1 --baudrate=115200\n#https://www.youtube.com/watch?v=UGjjP45wrKQ\n\nfrom filtros import Filtros\nfrom sensores import Sensores\nfrom gps import GPS\nfrom calc_algebra import Calc_Algebra\n\nimport time\nimport RPi.GPIO as gpio\nimport threading\nimport sys, os\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\nimport pandas as pd\nfrom tkinter import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\n\n# tell python where to find mavlink so we can import it\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../mavlink'))\nfrom pymavlink import mavutil\n\n\ngpio.setmode(gpio.BCM)\n\ngpio.setup(18,gpio.OUT)\ngpio.setup(25,gpio.OUT)\n\npwm = gpio.PWM(18, 50)\nmotor = gpio.PWM(25, 50)\n\ngpio.output(18,gpio.LOW)\ngpio.output(25,gpio.LOW)\n\npwm.start(0)\nmotor.start(0)\n\npwm.ChangeDutyCycle(6.6)\nmotor.ChangeDutyCycle(7.6)\ntime.sleep(2)\n\nobj_sensor = Sensores();\nobj_filtros = Filtros();\nobj_gps = GPS();\nobj_calc_algebra = Calc_Algebra();\n\nanguloGiro = 0\n \ngps_xy_anterior = obj_gps.Conversao_Geograficas_UTM(-22.25815,-45.69586)\ngps_xy_atual = obj_gps.Conversao_Geograficas_UTM(-22.25819,-45.69580)\ngps_xy_final = obj_gps.get_PontoFinal()\n\n\n\nn_satelites = 0\n\nsair = False\npausar = False\n\ndf = pd.DataFrame({\n 'gps_x_anterior': [],\n 'gps_y_anterior': [],\n 'gps_x_atual': [],\n 'gps_y_atual': [],\n 'gps_x_final': [],\n 'gps_y_final': [],\n 'angulo(erro)': 0,\n 'sinal(erro)': 0,\n 'corr': 0,\n 'servo': 0,\n 'gps_x_atual_real':[],\n 'gps_y_atual_real':[] \n })\n\n\nkp = 4 #proporcional. O quanto agrecivo o carro vai virar. Muda o servo. Menor: varia mais suave\nkd = 0.5 #derivativo. Deixa o proporcional mais rápido quando o derivativo por grande\nki = 0 #integrativo\n\nerro = 0\nerro_ant = 0\nsinalGiro = 0\nder_erro = 0\ncorr = 0\n\n\nmotorComand = True\n\nfig = plt.Figure(figsize=(5,4), dpi=100)\nax = fig.add_subplot(111)\n\n#---------------------------Threads\n\n \n \n \ndef thread_MPU():\n \n global anguloGiro, sair\n \n while sair == False:\n try:\n anguloGiro = obj_sensor.get_angulos_MPU6050();\n except:\n anguloGiro = 0\n \ndef thread_Principal():\n \n global sair\n global anguloGiro, gps_xy_anterior, gps_xy_atual, gps_xy_final, n_satelites\n global kp, kd, ki, erro, erro_ant, sinalGiro,der_erro,corr,depoisDasPrimeirasAmostras \n\n contGPS = 0\n depoisDasPrimeirasAmostras = 0\n erroVetores = 0\n anguloFinal = 0\n\n \n # create a mavlink serial instance\n master = mavutil.mavlink_connection('/dev/ttyACM1', baud=115200)\n \n # wait for the heartbeat msg to find the system ID\n master.wait_heartbeat()\n\n # request data to be sent at the given rate\n master.mav.request_data_stream_send(master.target_system, master.target_component, \n mavutil.mavlink.MAV_DATA_STREAM_ALL, 4, 1)\n \n while sair == False:\n\n while pausar == False: \n # grab a mavlink message\n msg = master.recv_match(blocking=True)\n \n if msg: \n # handle the message based on its type\n msg_type = msg.get_type()\n \n \n if msg_type == \"BAD_DATA\":\n if mavutil.all_printable(msg.data):\n sys.stdout.write(msg.data)\n sys.stdout.flush()\n elif msg_type == \"RC_CHANNELS_RAW\": \n #handle_rc_raw(msg)\n pass\n elif msg_type == \"HEARTBEAT\":\n #handle_heartbeat(msg)\n #print('heart',msg)\n pass\n elif msg_type == \"GPS_RAW_INT\":\n n_satelites = msg.satellites_visible\n pass\n \n elif msg_type == \"GLOBAL_POSITION_INT\":\n \n \n lat = float(msg.lat)/10000000\n lon = float(msg.lon)/10000000\n \n gps_xy_atual_bruto = obj_gps.Conversao_Geograficas_UTM(lat, lon)\n \n gps_xy_atual_real = obj_filtros.Filtro_IIR(0.3, gps_xy_anterior, gps_xy_atual_bruto) \n \n sinalGiro = obj_calc_algebra.get_ProdutoVetorial(gps_xy_anterior, gps_xy_atual, gps_xy_final)\n \n #Pega a faixa de tolerancia do ponto final\n gps_xy_final_Faixa = obj_gps.get_FaixaErroPontoFinal_PN(gps_xy_atual)\n \n if gps_xy_atual_real < gps_xy_atual and gps_xy_atual_real > gps_xy_atual: \n erroVetores = obj_calc_algebra.get_AnguloVetores_graus(gps_xy_anterior, gps_xy_atual, gps_xy_final)\n \n if(sinalGiro > 0):\n erroVetores = erroVetores*(-1)\n \n erro = anguloGiro - erroVetores\n \n if erro > 180:\n erro = erro - 360\n elif erro < -180:\n erro = erro + 360\n \n der_erro = erro - erro_ant\n \n corr = erro*kp + der_erro*kd\n \n if sinalGiro < 0:\n corr = corr * (-1)\n \n #para não ultrapassar o valor de 2000 a 1000 do servo\n if corr > 500:\n corr = 500\n elif corr < -500:\n corr = -500\n \n servo = 1500.0 + corr\n servo = servo - 1000\n servo = 5.0*servo/1000\n servo = servo + 5\n \n ControleServo(servo)\n motor.ChangeDutyCycle(7.8)\n \n print(\"anguloGiro: {:.2f} sinal: {:.2f} erro: {:.2f} servo: {:.2f} GPS_ATUAL_x: {:.2f} GPS_ATUAL_y: {:.2f}\"\n .format(anguloGiro, sinalGiro, erro, servo, gps_xy_atual_real[0], gps_xy_atual_real[1]))\n \n \n #print('ponto final', gps_xy_final, 'ponto atual', gps_xy_atual, 'ponto_anterior', gps_xy_anterior)\n #Se chegar perto do ponto final, para o motor\n \n \n #Adiciona novos valores no dataframe\n Adicionar_Dataframe(gps_xy_anterior,gps_xy_atual,gps_xy_final,erro,sinalGiro,corr,servo,gps_xy_atual_real)\n \n #time.sleep(0.2)\n \n erro_ant = erro \n contGPS -= 1\n \n#------------------------------------sinal-------Main-------------------------- \ndef animate(i):\n\n global anguloGiro, gps_xy_anterior,gps_xy_atual,gps_xy_final\n \n ax.clear()\n \n x = [gps_xy_anterior[0],gps_xy_atual[0],gps_xy_final[0]]\n y = [gps_xy_anterior[1],gps_xy_atual[1],gps_xy_final[1]]\n \n x2 = [gps_xy_atual[0],gps_xy_final[0]]\n y2 = [gps_xy_atual[1],gps_xy_final[1]]\n \n ax.plot(x, y, color='blue')\n ax.plot(x2, y2, color='orange')\n \ndef SalvarNovoPontoFinal():\n \n global gps_xy_final\n \n campo_lat = float(GPS_Final_input_lat.get())\n campo_lon = float(GPS_Final_input_lon.get())\n \n if(campo_lat!='' and campo_lon!=''):\n obj_gps.set_PontoFinal(campo_lat,campo_lon)\n \n l1.config(text=gps_xy_final)\n else:\n print('Preencha os campos lat e lon')\n \ndef Adicionar_Dataframe(gps_xy_anterior,gps_xy_atual,gps_xy_final,erro,sinalGiro,corr,servo,gps_xy_atual_2):\n \n global df\n \n #Adicionando valores no dataframe \n novaEntrada_df = [gps_xy_anterior[0],\n gps_xy_anterior[1],\n gps_xy_atual[0],\n gps_xy_atual[1],\n gps_xy_final[0],\n gps_xy_final[1],\n erro,\n sinalGiro,\n corr,\n servo,\n gps_xy_atual_2[0],\n gps_xy_atual_2[1]] \n \n df.loc[len(df)] = novaEntrada_df\n \n \ndef GerarCSV():\n \n global df\n \n \n ax = plt.gca()\n \n if df.index.isin([('0')]).any():\n df = df.drop(0)\n \n '''\n #Rota do ponto anterior\n df.plot(kind='line',x='gps_x_anterior',y='gps_y_anterior',color='red',ax=ax,marker=\"o\")\n df.plot(kind='line',x='gps_x_final',y='gps_y_final',color='magenta',ax=ax,marker=\"o\")\n '''\n #Reta primeiro ponto ao final\n #df.plot(kind='scatter',x='gps_x_atual',y='gps_y_atual',color='blue',ax=ax,marker=\"o\")\n \n #Pontos do GPS \n df.plot(kind='scatter',x='gps_x_atual_real',y='gps_y_atual_real',color='blue',ax=ax,marker=\"o\")\n df.plot(kind='line',x='gps_x_final',y='gps_y_final',color='magenta',ax=ax,marker=\"o\")\n plt.show()\n \n #plt.xticks(values,x)\n \n\ndef Pausar():\n \n global pausar\n \n \n \n if pausar == False:\n pausar = True\n print('Parado')\n else:\n pausar = False\n print('Rodando')\n \n \ndef Sair():\n \n global sair,tk, pausar, df\n \n file = 'log_GPS_Fixo.csv'\n print('Pronto para finalizar')\n \n #Desligo o motor\n pausar = True\n \n time.sleep(1)\n \n #Sai das threads\n sair = True\n \n df.to_csv(file)\n \n #Destroi a interface\n tk.destroy()\n\ndef ControleServo(servo):\n global pwm\n \n pwm.ChangeDutyCycle(servo)\n \ndef ControleMotor(comando):\n \n \n \n #COMANDO:\n #0 = PARAR\n #1 = LIGAR\n \n if comando == 0:\n \n print('Motor parado')\n elif comando == 1:\n \n print('Motor ligado')\n \n \ndef PararMotor():\n \n \n print('Motor parado') \n motorComand = False\n \ntry:\n \n thread1 = threading.Thread(target=thread_MPU).start()\n thread2 = threading.Thread(target=thread_Principal).start()\n #thread3 = threading.Thread(target=thread_MOTOR).start() \n \n \n tk = Tk()\n \n tk.title('Navegação')\n tk.geometry(\"700x600\")\n tk.configure(bg='#FFF')\n \n titulo_GPS = Label(tk, text='Coordenadas GSP em UTM',bg='#FFF',font='Helvetica 12 bold')\n titulo_GPS.grid(column=1,row=1,padx=10,pady=10)\n \n texto_GPS_Anterior = Label(tk, text='GPS Ponto 1 [x,y]: ',bg='#d5f5e3',padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Anterior.grid(column=1,row=2,padx=10,pady=10)\n GPS_Anteriorx = Label(tk, text=gps_xy_anterior[0],bg='#e8f8f5',padx=5,pady=5)\n GPS_Anteriorx.grid(column=1,row=3)\n GPS_Anteriory = Label(tk, text=gps_xy_anterior[1],bg='#e8f8f5',padx=5,pady=5)\n GPS_Anteriory.grid(column=1,row=4)\n \n texto_GPS_Atual = Label(tk, text='GPS Ponto 2 [x,y]: ',bg='#fcf3cf',padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Atual.grid(column=2,row=2,padx=10,pady=10)\n GPS_Atual = Label(tk, text=gps_xy_atual[0],bg='#fef9e7',padx=5,pady=5)\n GPS_Atual.grid(column=2,row=3)\n GPS_Atual = Label(tk, text=gps_xy_atual[1],bg='#fef9e7',padx=5,pady=5)\n GPS_Atual.grid(column=2,row=4)\n \n texto_GPS_Final = Label(tk, text='GPS Ponto 3 [x,y]: ',bg='#fae5d3',padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Final.grid(column=3,row=2,padx=10,pady=10)\n GPS_Final = Label(tk, text=gps_xy_final[0],bg='#fdf2e9')\n GPS_Final.grid(column=3,row=3)\n GPS_Final = Label(tk, text=gps_xy_final[1],bg='#fdf2e9')\n GPS_Final.grid(column=3,row=4)\n \n texto_GPS_Satelites = Label(tk, text='Satélites disponíveis: ',bg='#f8f9f9',padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Satelites.grid(column=1,row=5,padx=10,pady=10)\n GPS_Satelites = Label(tk, text=n_satelites,bg='#FFF')\n GPS_Satelites.grid(column=1,row=6)\n \n texto_correcaoErro = Label(tk, text='Correção em Angulo (Erro): ',bg='#fff',padx=5,pady=5,font='Helvetica 10 bold')\n texto_correcaoErro.grid(column=1,row=7,padx=10,pady=10)\n correcaoErro = Label(tk, text=erro,bg='#FFF')\n correcaoErro.grid(column=1,row=8)\n \n texto_correcaoGiro = Label(tk, text='Sinal de Giro (esq/dir = +/-): ',bg='#fff',padx=5,pady=5,font='Helvetica 10 bold')\n texto_correcaoGiro.grid(column=2,row=7,padx=10,pady=10)\n \n if sinalGiro > 0:\n correcaoGiro = Label(tk, text='esquerda (+)',bg='#FFF')\n correcaoGiro.grid(column=2,row=8)\n else:\n correcaoGiro = Label(tk, text='direita (-)',bg='#FFF')\n correcaoGiro.grid(column=2,row=8)\n \n texto_GPS_Final_input = Label(tk, text='Final (Latitude): ',bg='#e59866',fg=\"#fff\",padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Final_input.grid(column=1,row=9,padx=10,pady=10)\n GPS_Final_input_lat = Entry(tk)\n GPS_Final_input_lat.grid(column=1,row=10)\n \n texto_GPS_Final_input = Label(tk, text='Final (Longitude): ',bg='#e59866',fg=\"#fff\",padx=5,pady=5,font='Helvetica 10 bold')\n texto_GPS_Final_input.grid(column=2,row=9,padx=10,pady=10)\n GPS_Final_input_lon = Entry(tk)\n GPS_Final_input_lon.grid(column=2,row=10)\n \n botaoPonto = Button(tk, text=\"Novo Ponto Final\", command=SalvarNovoPontoFinal,bg='#dc7633',fg=\"#fff\")\n botaoPonto.grid(column=3,row=10,padx=10,pady=10)\n \n botaoSair = Button(tk, text=\"Gráfico\", command=GerarCSV,bg='#7fb3d5',fg=\"#fff\")\n botaoSair.grid(column=2,row=11,padx=10,pady=10)\n \n botaoGrafico = Button(tk, text=\"Parar Motor\", command=PararMotor,bg='#f1948a',fg=\"#fff\")\n botaoGrafico.grid(column=2,row=12,padx=10,pady=10)\n \n botaoSair = Button(tk, text=\"PAUSAR\", command=Pausar,bg='#c0392b',fg=\"#fff\")\n botaoSair.grid(column=1,row=11,padx=10,pady=10)\n \n botaoSair = Button(tk, text=\"SAIR\", command=Sair,bg='#c0392b',fg=\"#fff\")\n botaoSair.grid(column=1,row=12,padx=10,pady=10)\n \n '''\n canvas = FigureCanvasTkAgg(fig, master=tk)\n canvas.get_tk_widget().grid(column=2,row=10,columnspan=4, rowspan=11,padx=10,pady=10)\n \n ani = animation.FuncAnimation(fig, animate, interval=20)\n '''\n tk.mainloop()\n \n \nexcept(KeyboardInterrupt, SystemExit):\n #gpio.cleanup()\n \n thread.__stop()\n thread.join()\n time.sleep(5)\n pass\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.Figure", "matplotlib.use", "pandas.DataFrame", "matplotlib.pyplot.show" ] ]
SGyutan/streamlit_aggrid
[ "3459b39c9f6109975fc99be2818738009fd039f1" ]
[ "tut_2.py" ]
[ "# tut1.py\r\nimport pandas as pd\r\nimport streamlit as st\r\nfrom st_aggrid import AgGrid\r\nfrom st_aggrid.grid_options_builder import GridOptionsBuilder\r\n\r\nst.set_page_config(page_title=\"Netflix Shows\", layout=\"wide\") \r\nst.title(\"Netlix shows analysis\")\r\n\r\nshows = pd.read_csv(\"netflix_titles.csv\")\r\n\r\n# ---\r\ngb = GridOptionsBuilder.from_dataframe(shows)\r\ngb.configure_pagination()\r\ngridOptions = gb.build()\r\n\r\nAgGrid(shows, gridOptions=gridOptions)\r\n\r\n# streamlit run tut_2.py\r\n# stop: ctrl + c " ]
[ [ "pandas.read_csv" ] ]
jimmycdunn/transition-risk-tool
[ "64dfcfdb84f24ae3cccc58ad966170223c9f0dfc" ]
[ "ffequity/utils/dataframefile.py" ]
[ "import pandas as pd\nfrom datetime import datetime, date\n\n\nclass DataFrameFileException(Exception):\n pass\n\n\nclass DataFrameFile:\n \"\"\" Wrapper around dataframe supporting file operations\"\"\"\n def __init__(self, data=None):\n self.data = data\n\n def read(self, fileName):\n \"\"\"Read in filename, store in self.data\"\"\"\n self.data = pd.read_csv(fileName, encoding=\"ISO-8859-1\") # make sure fileName is correct\n return self.data\n\n def write(self, fileName, path=None):\n \"\"\"Write current dataframe to fileName\"\"\"\n # if data is none raise exception\n if self.data is None:\n raise DataFrameFileException(\"No data to write.\")\n if not path.endswith('/'):\n path += '/'\n\n self.data.to_csv(path + fileName + '.csv') # make sure fileName is correct\n # commenting out file prefixes with run date for readability\n #self.data.to_csv(path + self.get_file_prefix() + fileName + '.csv') # make sure fileName is correct\n\n @staticmethod\n def get_file_prefix(today=date.today()):\n #return ''.join([str(i) for i in today.timetuple()[0:3]])\n return datetime.strftime(today, \"%Y%m%d\")\n" ]
[ [ "pandas.read_csv" ] ]
benpetit/cs379c
[ "81a67d49188bc7358653bd24dd9d8bb2e9264aba" ]
[ "network.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nfrom rlsaber.tf_util import lstm, batch_to_seq, seq_to_batch\n\n\ndef make_cnn(convs, padding, inpt, initializer=None):\n if initializer is None:\n initializer = tf.orthogonal_initializer(np.sqrt(2.0))\n out = inpt\n with tf.variable_scope('convnet'):\n for num_outputs, kernel_size, stride in convs:\n out = layers.convolution2d(\n out,\n num_outputs=num_outputs,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n activation_fn=tf.nn.relu,\n weights_initializer=initializer\n )\n return out\n\ndef make_fcs(fcs, inpt, activation=tf.nn.relu, initializer=None):\n if initializer is None:\n initializer = tf.orthogonal_initializer(np.sqrt(2.0))\n out = inpt\n with tf.variable_scope('hiddens'):\n for hidden in fcs:\n out = layers.fully_connected(out, hidden, activation_fn=activation,\n weights_initializer=initializer)\n return out\n\ndef make_lstm(lstm_unit, nenvs, step_size, inpt, masks, rnn_state):\n with tf.variable_scope('rnn'):\n rnn_in = batch_to_seq(inpt, nenvs, step_size)\n masks = batch_to_seq(masks, nenvs, step_size)\n rnn_out, rnn_state = lstm(\n rnn_in, masks, rnn_state, lstm_unit, np.sqrt(2.0))\n rnn_out = seq_to_batch(rnn_out, nenvs, step_size)\n return rnn_out, rnn_state\n\ndef cnn_network(convs,\n fcs,\n use_lstm,\n padding,\n inpt,\n masks,\n rnn_state,\n num_actions,\n lstm_unit,\n nenvs,\n step_size,\n scope):\n out = make_cnn(convs, padding, inpt)\n out = layers.flatten(out)\n out = make_fcs(fcs, out)\n rnn_out, rnn_state = make_lstm(\n lstm_unit, nenvs, step_size, out, masks, rnn_state)\n\n if use_lstm:\n out = rnn_out\n\n policy = layers.fully_connected(\n out, num_actions, activation_fn=None,\n weights_initializer=tf.orthogonal_initializer(0.1))\n dist = tf.distributions.Categorical(probs=tf.nn.softmax(policy))\n\n value = layers.fully_connected(\n out, 1, activation_fn=None,\n weights_initializer=tf.orthogonal_initializer(1.0))\n\n return dist, value, rnn_state\n\ndef mlp_network(fcs,\n use_lstm,\n inpt,\n masks,\n rnn_state,\n num_actions,\n lstm_unit,\n nenvs,\n step_size,\n scope):\n policy_rnn_state, value_rnn_state = tf.split(rnn_state, 2, axis=-1)\n\n inpt = layers.flatten(inpt)\n input_dim = inpt.get_shape().as_list()[1] + 1\n def initializer(scale):\n return tf.random_normal_initializer(stddev=np.sqrt(scale / input_dim))\n\n with tf.variable_scope('policy'):\n out = make_fcs(\n fcs, inpt, activation=tf.nn.tanh, initializer=initializer(1.0))\n rnn_out, policy_rnn_state = make_lstm(\n lstm_unit//2, nenvs, step_size, out, masks, policy_rnn_state)\n\n if use_lstm:\n out = rnn_out\n\n policy = layers.fully_connected(out, num_actions, activation_fn=None,\n weights_initializer=initializer(0.01))\n logstd = tf.get_variable(name='logstd', shape=[1, num_actions],\n initializer=tf.zeros_initializer())\n std = tf.zeros_like(policy) + tf.exp(logstd)\n dist = tf.distributions.Normal(loc=policy, scale=std)\n\n with tf.variable_scope('value'):\n out = make_fcs(\n fcs, inpt, activation=tf.nn.tanh, initializer=initializer(1.0))\n rnn_out, value_rnn_state = make_lstm(\n lstm_unit//2, nenvs, step_size, out, masks, value_rnn_state)\n\n if use_lstm:\n out = rnn_out\n\n value = layers.fully_connected(\n out, 1, activation_fn=None, weights_initializer=initializer(1.0))\n\n rnn_state = tf.concat([policy_rnn_state, value_rnn_state], axis=-1)\n\n return dist, value, rnn_state\n\n\ndef _make_network(convs,\n fcs,\n use_lstm,\n padding,\n continuous,\n inpt,\n masks,\n rnn_state,\n num_actions,\n lstm_unit,\n nenvs,\n step_size,\n scope):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n if continuous:\n return mlp_network(fcs, use_lstm, inpt, masks, rnn_state,\n num_actions, lstm_unit, nenvs, step_size, scope)\n else:\n return cnn_network(convs, fcs, use_lstm, padding, inpt, masks,\n rnn_state, num_actions, lstm_unit, nenvs,\n step_size, scope)\n\ndef make_network(convs, fcs, use_lstm=True, padding='VALID', continuous=False):\n return lambda *args, **kwargs: _make_network(convs, fcs, use_lstm, padding,\\\n continuous, *args, **kwargs)\n" ]
[ [ "tensorflow.contrib.layers.convolution2d", "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.distributions.Normal", "numpy.sqrt", "tensorflow.zeros_initializer", "tensorflow.exp", "tensorflow.contrib.layers.fully_connected", "tensorflow.orthogonal_initializer", "tensorflow.zeros_like", "tensorflow.contrib.layers.flatten", "tensorflow.variable_scope", "tensorflow.split" ] ]
omjawade/Medidoc
[ "ff36c8404b9cb943398f9aae3eec18ed1bf29f49" ]
[ "DiabetesLinearReg.py" ]
[ "import pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\nimport pickle\r\nfrom sklearn import svm\r\ndf = pd.read_csv(\"data/diabetes.csv\")\r\n#use required features\r\n# cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB','CO2EMISSIONS']]\r\n\r\n#Training Data and Predictor Variable\r\n# Use all data for training (tarin-test-split not used)\r\nx = df.iloc[:, :-1]\r\ny = df.iloc[:, -1]\r\nregressordiabetes = svm.SVC(kernel='linear')\r\n\r\n#Fitting model with trainig data\r\nregressordiabetes.fit(x, y)\r\n\r\n# Saving model to current directory\r\n# Pickle serializes objects so they can be saved to a file, and loaded in a program again later on.\r\npickle.dump(regressordiabetes, open('model.pkl','wb'))\r\n\r\n\r\n#Loading model to compare the results\r\nmodel = pickle.load(open('model.pkl','rb'))\r\nprint(model.predict([[5,166,72,19,175,25.8,0.587,51]]))\r\n" ]
[ [ "pandas.read_csv", "sklearn.svm.SVC" ] ]
yashchandak/GNN
[ "818d1aa25bd50a65bff3577758306d2e6c591100", "818d1aa25bd50a65bff3577758306d2e6c591100", "818d1aa25bd50a65bff3577758306d2e6c591100" ]
[ "Sample_Run/Seq_att/cells.py", "Sample_Run/Dynamic_Bi/__main__.py", "Sample_Run/Seq_Dynamic/Eval_utils.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.ops.rnn_cell import RNNCell\nimport numpy as np\n\n\nclass MyLSTMCell(RNNCell):\n '''Vanilla LSTM implemented with same initializations as BN-LSTM'''\n\n def __init__(self, num_units):\n self.num_units = num_units\n\n @property\n def state_size(self):\n return (self.num_units, self.num_units)\n\n @property\n def output_size(self):\n return self.num_units\n\n def __call__(self, x, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n c, h = state\n\n # Keep W_xh and W_hh separate here as well to reuse initialization methods\n x_size = x.get_shape().as_list()[1]\n W_xh = tf.get_variable('W_xh',\n [x_size, 4 * self.num_units],\n initializer=orthogonal_initializer())\n W_hh = tf.get_variable('W_hh',\n [self.num_units, 4 * self.num_units],\n initializer=bn_lstm_identity_initializer(0.95))\n bias = tf.get_variable('bias', [4 * self.num_units])\n\n # hidden = tf.matmul(x, W_xh) + tf.matmul(h, W_hh) + bias\n # improve speed by concat.\n concat = tf.concat(1, [x, h])\n W_both = tf.concat(0, [W_xh, W_hh])\n hidden = tf.matmul(concat, W_both) + bias\n\n i, j, f, o = tf.split(1, 4, hidden)\n\n new_c = c * tf.sigmoid(f) + tf.sigmoid(i) * tf.tanh(j)\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n\n return new_h, (new_c, new_h)\n\n\nclass LSTMgated(RNNCell):\n '''Vanilla LSTM implemented with same initializations as BN-LSTM'''\n\n def __init__(self, num_units):\n self.num_units = num_units\n\n @property\n def state_size(self):\n return (self.num_units, self.num_units)\n\n @property\n def output_size(self):\n return self.num_units\n\n def __call__(self, x, state, context, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n c, h = state\n\n # Keep W_xh and W_hh separate here as well to reuse initialization methods\n x_size = x.get_shape().as_list()[1]\n context_size = context.get_shape().as_list()[1]\n W_xh = tf.get_variable('W_xh',\n [x_size, 4 * self.num_units])#,\n # initializer=orthogonal_initializer())\n W_ch = tf.get_variable('W_ch',\n [context_size, 4 * self.num_units])#,\n # initializer=orthogonal_initializer())\n W_hh = tf.get_variable('W_hh',\n [self.num_units, 4 * self.num_units],\n initializer=bn_lstm_identity_initializer(0.95))\n bias = tf.get_variable('bias', [4 * self.num_units])\n bias_c = tf.get_variable('bias_c', [self.num_units])\n\n # hidden = tf.matmul(x, W_xh) + tf.matmul(h, W_hh) + bias\n # improve speed by concat.\n concat = tf.concat(1, [x, h, context])\n W_both = tf.concat(0, [W_xh, W_hh, W_ch])\n hidden = tf.matmul(concat, W_both) + bias\n\n i, j, f, o = tf.split(1, 4, hidden)\n\n #context_gate = tf.matmul(context, W_ch) + bias_c\n\n new_c = c * tf.sigmoid(f) + tf.sigmoid(i) * tf.tanh(j)\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n\n return new_h, (new_c, new_h)\n\n\n\nclass RNN(RNNCell):\n def __init__(self, num_units):\n self.num_units = num_units\n\n @property\n def state_size(self):\n return (self.num_units, self.num_units)\n\n @property\n def output_size(self):\n return self.num_units\n\n def __call__(self, x, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n h = state\n\n # Keep W_xh and W_hh separate here as well to reuse initialization methods\n x_size = x.get_shape().as_list()[1]\n W_xh = tf.get_variable('W_xh', [x_size, self.num_units])\n W_hh = tf.get_variable('W_hh', [self.num_units, self.num_units])\n bias = tf.get_variable('bias', [self.num_units])\n\n hidden = tf.matmul(x, W_xh) + bias # + tf.matmul(h, W_hh)\n new_h = tf.tanh(hidden)\n\n return new_h, (new_h)\n\n\ndef orthogonal(shape):\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v\n return q.reshape(shape)\n\ndef bn_lstm_identity_initializer(scale):\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n '''Ugly cause LSTM params calculated in one matrix multiply'''\n size = shape[0]\n # gate (j) is identity\n t = np.zeros(shape)\n t[:, size:size * 2] = np.identity(size) * scale\n t[:, :size] = orthogonal([size, size])\n t[:, size * 2:size * 3] = orthogonal([size, size])\n t[:, size * 3:] = orthogonal([size, size])\n return tf.constant(t, dtype)\n\n return _initializer\n\ndef orthogonal_initializer():\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n return tf.constant(orthogonal(shape), dtype)\n return _initializer", "from __future__ import print_function\nimport os.path\nimport time, math, sys\nfrom copy import deepcopy\nimport scipy.sparse as sps\nfrom scipy.io import loadmat\nimport numpy as np\nfrom sklearn.preprocessing import normalize\n\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport blogDWdata as input_data\nimport network as architecture\nimport Config as conf\nimport Eval_Calculate_Performance as perf\nfrom Utils import labels_to_onehot, sample\nfrom copy import deepcopy\n\n#import Eval_MLP as NN\nimport Eval_linear as liblinear\nimport Eval_Config\n\ncfg = conf.Config() \n\n#Code structure inspired from Stanford's cs224d assignment starter codes\n#class DNN(Model):\nclass RNNLM_v1(object):\n def __init__(self, config):\n self.config = config\n # Generate placeholders for the images and labels.\n self.load_data()\n self.add_placeholders()\n #self.add_metrics()\n\n # Build model\n self.arch = self.add_network(config)\n self.inputs = self.arch.embedding(self.data_placeholder)\n self.rnn_outputs = self.arch.predict(self.inputs,self.keep_prob, self.seq_len)\n self.outputs = self.arch.projection(self.rnn_outputs)\n\n # casting to handle numerical stability\n self.predictions_next = [tf.nn.softmax(tf.cast(o, 'float64')) for o in self.outputs[0]]\n # Reshape the output into len(vocab) sized chunks - the -1 says as many as\n # needed to evenly divide\n output_next = tf.reshape(tf.concat(1, self.outputs[0]), [-1, self.config.data_sets._len_vocab])\n #output_label = tf.reshape(tf.concat(1, self.outputs[1]), [-1, self.config.data_sets._len_labels])\n output_label = self.outputs[1]\n \n self.loss = self.arch.loss([output_next, output_label], self.label_placeholder, self.label_2_placeholder, self.inputs, self.data_placeholder)\n self.optimizer = self.config.solver._parameters['optimizer']\n self.train = self.arch.training(self.loss,self.optimizer)\n\n self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)\n self.summary = tf.summary.merge_all()\n self.step_incr_op = self.arch.global_step.assign(self.arch.global_step+1)\n #local variable initialization required for metrics operation, otherwise throws error\n # self.init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())\n self.init = tf.global_variables_initializer()#tf.initialize_all_variables()\n \n def predict_results(self,sess, all_labels, return_labels = False):\n labels_orig, data = [], []\n for k,v in all_labels.items():\n labels_orig.append(v)\n data.append([k])\n\n #Replicate data on 2nd axis to meet the dimensions of data placeholder\n #But since dynamic RNNs are used, only lengths of 'seq_length' are evaluated :)\n data = np.tile(data, (1, self.config.num_steps))\n feed_dict = {self.data_placeholder: data, self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len: [1]*len(data)}\n labels_pred = sess.run(self.arch.label_sigmoid, feed_dict=feed_dict)[0]\n if return_labels:\n return labels_pred\n else:\n return perf.evaluate(labels_pred, labels_orig, 0)\n\n def load_data(self):\n # Get the 'encoded data'\n self.data_sets = input_data.read_data_sets(self.config)\n debug = self.config.debug\n if debug:\n print('##############--------- Debug mode ')\n num_debug = (self.config.num_steps+1)*128\n self.data_sets.train._x = self.data_sets.train._x[:num_debug]\n self.data_sets.validation._x = self.data_sets.validation._x[:num_debug]\n #self.data_sets.test_x = self.data_sets.test_x[:num_debug]\n \n self.config.data_sets._len_vocab = self.data_sets.train.vocab.__len__()\n\n l = len(list(self.data_sets.train.labels.values())[0])\n self.config.data_sets._len_labels= l\n\n print('--------- Project Path: '+self.config.codebase_root_path+self.config.project_name)\n print('--------- Vocabulary Length: '+str(self.config.data_sets._len_vocab))\n print('--------- Label Length: '+str(self.config.data_sets._len_labels))\n print('--------- No. of Labelled nodes: ' + str(len(self.data_sets.train.labels.keys())))\n\n def add_placeholders(self):\n self.data_placeholder = tf.placeholder(tf.int32,shape=[None,self.config.num_steps], name='Input')\n self.label_placeholder = tf.placeholder(tf.int32,name='Target')\n self.label_2_placeholder = tf.placeholder(tf.int32,name='Target_label')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n self.seq_len = tf.placeholder(tf.int32, shape=[None], name='Seq_len')\n \t#self.metrics = tf.placeholder(tf.float32,shape=(len(self.config.metrics),))\n\n def create_feed_dict(self, input_batch, label_batch, label_batch_2, seq_len):\n feed_dict = {\n self.data_placeholder: input_batch,\n self.label_placeholder: label_batch,\n self.label_2_placeholder: label_batch_2,\n self.seq_len: seq_len\n }\n return feed_dict\n\n def add_network(self, config):\n return architecture.Network(config)\n\n def add_metrics(self, metrics):\n \"\"\"assign and add summary to a metric tensor\"\"\"\n for i,metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])\n\n def add_summaries(self,sess):\n # Instantiate a SummaryWriter to output summaries and the Graph.\n self.summary_writer_train = tf.train.SummaryWriter(self.config.logs_dir+\"train\", sess.graph)\n self.summary_writer_val = tf.train.SummaryWriter(self.config.logs_dir+\"val\", sess.graph)\n \n def write_summary(self,sess,summary_writer, metric_values, step, feed_dict):\n summary = self.summary.merged_summary\n #feed_dict[self.loss]=loss\n feed_dict[self.metrics]=metric_values\n summary_str = sess.run(summary, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n\n\n def run_epoch(self, sess, dataset, train_op=None, summary_writer=None,verbose=1000):\n if not train_op :\n train_op = tf.no_op()\n keep_prob = 1\n else:\n keep_prob = self.config.architecture._dropout\n # And then after everything is built, start the training loop.\n total_loss = []\n next_loss = []\n label_loss = []\n sim_loss = []\n emb_loss = []\n grads = []\n f1_micro, f1_macro = [], []\n total_steps = sum(1 for x in dataset.next_batch(self.config.batch_size,self.config.num_steps))\t\n\t#Sets to state to zero for a new epoch\n state = self.arch.initial_state.eval()\n for step, (input_batch, label_batch, label_batch_2, seq_len) in enumerate(\n dataset.next_batch(self.config.batch_size,self.config.num_steps)):\n\n #print(\"\\n\\n\\nActualLabelCount: \", input_batch, label_batch, label_batch_2, seq_len, np.sum(label_batch_2, axis=2))\n feed_dict = self.create_feed_dict(input_batch, label_batch, label_batch_2, seq_len)\n feed_dict[self.keep_prob] = keep_prob\n\t #Set's the initial_state temporarily to the previous final state for the session \"AWESOME\" -- verified\n\t #feed_dict[self.arch.initial_state] = state \n\t \n\t #Writes loss summary @last step of the epoch\n if (step+1) < total_steps:\n _, loss_value, state, pred_labels = sess.run([train_op, self.loss, self.arch.final_state, self.arch.label_sigmoid], feed_dict=feed_dict)\n else:\n _, loss_value, state, summary, pred_labels = sess.run([train_op, self.loss, self.arch.final_state,self.summary,self.arch.label_sigmoid], feed_dict=feed_dict)\n if summary_writer != None:\n summary_writer.add_summary(summary,self.arch.global_step.eval(session=sess))\n summary_writer.flush()\n #print(loss_value)\n total_loss.append(loss_value[0])\n next_loss.append(loss_value[1])\n label_loss.append(loss_value[2])\n sim_loss.append(loss_value[3])\n emb_loss.append(loss_value[4])\n #print(loss_value[5])\n grads.append(np.mean(loss_value[5][0]))\n \n\n #print(\"\\n\\n\\nPredLabels:\", pred_labels)\n\n if verbose and step % verbose == 0:\n metrics = [0]*20\n if self.config.solver._curr_label_loss:\n # metrics = perf.evaluate(pred_labels, label_batch_2, 0)\n metrics = self.predict_results(sess, dataset.labels)\n self.add_metrics(metrics)\n f1_micro.append(metrics[3])\n f1_macro.append(metrics[4])\n print('%d/%d : pp = %0.3f : next = %0.3f : label = %0.3f : micro-F1 = %0.3f : macro-F1 = %0.3f : sim = %0.3f : emb = %0.3f : grads = %0.12f'%(step, total_steps, np.exp(np.mean(total_loss)), np.mean(next_loss), np.mean(label_loss), np.mean(f1_micro), np.mean(f1_macro), np.mean(sim_loss), np.mean(emb_loss), np.mean(grads)), end=\"\\r\")\n sys.stdout.flush()\n \n if verbose:\n sys.stdout.write('\\r')\n return np.exp(np.mean(total_loss)),np.mean(total_loss), np.mean(f1_micro), np.mean(f1_macro)\n\n def fit(self, sess):\n #define parametrs for early stopping early stopping\n max_epochs = self.config.max_epochs\n patience = self.config.patience # look as this many examples regardless\n patience_increase = self.config.patience_increase # wait this much longer when a new best is found\n improvement_threshold = self.config.improvement_threshold # a relative improvement of this much is\n # considered significant\n \n # go through this many minibatches before checking the network on the validation set\n # Here we check every epoch\n validation_loss = 1e6\n done_looping = False\n step = 1\n best_step = -1\n losses = []\n learning_rate = self.config.solver._parameters['learning_rate']\n #sess.run(self.init) #DO NOT DO THIS!! Doesn't restart from checkpoint\n while (step <= self.config.max_epochs) and (not done_looping):\n #print 'Epoch {}'.format(epoch)\n\t #step_incr_op = tf.assign_add(self.global_step,1)\n sess.run([self.step_incr_op])\n epoch = self.arch.global_step.eval(session=sess)\n\n start_time = time.time()\n tr_pp, average_loss, tr_micro, tr_macro = self.run_epoch(sess,self.data_sets.train,train_op=self.train,summary_writer=self.summary_writer_train)\n duration = time.time() - start_time\n\n if (epoch % self.config.val_epochs_freq == 0):\n val_pp,val_loss, val_micro, val_macro = self.run_epoch(sess,self.data_sets.validation,summary_writer=self.summary_writer_val)\n\n print('\\nEpoch %d: tr_loss = %.2f, val_loss = %.2f || tr_pp = %.2f, val_pp = %.2f || tr_micro = %.2f, val_micro = %.2f || tr_macro = %.2f, val_macro = %.2f (%.3f sec)'\n % (epoch, average_loss, val_loss, tr_pp, val_pp, tr_micro, val_micro, tr_macro, val_macro, duration))\n \t\n # Save model only if the improvement is significant\n if (val_loss < validation_loss * improvement_threshold) and (epoch > self.config.save_epochs_after):\n patience = max(patience, epoch * patience_increase)\n validation_loss = val_loss\n checkpoint_file = self.config.ckpt_dir + 'checkpoint'\n self.saver.save(sess, checkpoint_file, global_step=epoch)\n best_step = epoch\n patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)\n #print('best step %d'%(best_step))\n\t\t\n elif val_loss > validation_loss * improvement_threshold:\n patience = epoch - 1\n\n else:\n\t\t # Print status to stdout.\n print('Epoch %d: loss = %.2f pp = %.2f (%.3f sec)' % (epoch, average_loss, tr_pp, duration))\n\n if (patience <= epoch):\n\t\t#config.val_epochs_freq = 2\n learning_rate = learning_rate / 10\n self.optimizer = tf.train.AdamOptimizer(learning_rate)\n patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)\n print('--------- Learning rate dropped to: %f'%(learning_rate))\t\t\n if learning_rate <= 0.0000001:\n print('Stopping by patience method')\n done_looping = True\n\n losses.append(average_loss) \n step += 1\n\n return losses, best_step\n\n def get_embedding(self,sess,data, layer = 0):\n if layer == 0:\n feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval()}\n return sess.run(self.inputs,feed_dict=feed_dict)[0]\n\t\n if layer == 1:\n feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len:[1]}\n return sess.run(self.rnn_outputs, feed_dict=feed_dict)[0]\n\n else:\n print(\"Undefined layer\")\n return\n\n def get_hidden_state(self,sess,data,eos_embed=None):\n if eos_embed is None:\n eos_embed = self.arch.initial_state.eval()\n feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: eos_embed, self.seq_len:[1]}\n return sess.run(self.rnn_outputs,feed_dict=feed_dict)[0]\n\n def generate_text(self,session, starting_text='<eos>',stop_length=100, stop_tokens=None, temp=1.0 ):\n \"\"\"Generate text from the model.\n\t Args:\n\t session: tf.Session() object\n\t starting_text: Initial text passed to model.\n\t Returns:\n\t output: List of word idxs\n\t\"\"\"\n state = self.arch.initial_state.eval()\n\t# Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [self.data_sets.train.vocab.encode(word) for word in starting_text.split()]\n all_labels = []\n for i in range(stop_length):\n feed = {self.data_placeholder: [tokens[-1:]], self.arch.initial_state: state, self.keep_prob: 1}\n state, y_pred, embed, pred_labels = session.run([self.arch.final_state, self.predictions_next[-1],self.inputs, self.arch.label_sigmoid], feed_dict=feed)\n state = state[0]\n all_labels.append(pred_labels[0][0]) #batch-0, seq number-0\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and self.data_sets.train.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [self.data_sets.train.vocab.decode(word_idx) for word_idx in tokens]\n\n #Print out the next nodes and corresponding labels\n\n #print(\"labels and nodes are both incremented by 1 as compared to original dataset\")\n #for step, labels in enumerate(all_labels):\n # temp = []\n # for idx, val in enumerate(labels):\n # if val>0.25:\n # temp.append(idx)\n # print(output[step], \": \", temp)\n\n return output\n \n #def generate_sentence(self,session,starting_text,temp): \n def generate_sentence(self,session,*args, **kwargs):\n \"\"\"Convenice to generate a sentence from the model.\"\"\"\n return self.generate_text(session, *args, stop_tokens=['<eos>'], **kwargs)\n\n\n\n########END OF CLASS MODEL#############################################################################################################\n\ndef init_Model(config):\n tf.reset_default_graph()\n with tf.variable_scope('RNNLM',reuse=None) as scope:\n model = RNNLM_v1(config)\n \n tfconfig = tf.ConfigProto( allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n sm = tf.train.SessionManager()\n\n if config.retrain:\n load_ckpt_dir = config.ckpt_dir\n print('--------- Loading variables from checkpoint if available')\n else:\n load_ckpt_dir = ''\n print('--------- Training from scratch')\n sess = sm.prepare_session(\"\", init_op=model.init, saver=model.saver, checkpoint_dir=load_ckpt_dir,config=tfconfig)\n return model, sess\n\ndef train_DNNModel():\n #global cfg\n print('############## Training Module ')\n config = deepcopy(cfg)\n model,sess = init_Model(config)\n with sess:\n\t model.add_summaries(sess)\n\t losses, best_step = model.fit(sess)\n return losses\n\ndef test_DNNModel():\n #global cfg\n print('############## Test Module ')\n config = deepcopy(cfg)\n model,sess = init_Model(config) \n with sess:\n test_pp = model.run_epoch(sess,model.data_sets.validation)\n print('=-=' * 5)\n print('Test perplexity: {}'.format(test_pp))\n print('=-=' * 5)\n\ndef interactive_generate_text_DNNModel():\n #global cfg\n print('############## Generate Text Module ')\n config = deepcopy(cfg)\n config.batch_size = config.num_steps = 1\n model,sess = init_Model(config)\n with sess:\n starting_text = '2'\n while starting_text:\n print(' '.join(model.generate_sentence(sess, starting_text=starting_text, temp=1.0)))\n starting_text = input('> ')\n\ndef dump_generate_text_DNNModel():\n global cfg\n print('############## Generate sentences for all words in dictionary and Dump ')\n config = deepcopy(cfg)\n config.batch_size = config.num_steps = 1\n model,sess = init_Model(config)\n num_sentences = 2\n with sess:\n ignore_list = ['0','<eos>','<unk>'] \n keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list] \n keys.sort()\n vocab_len = len(keys)\n f_id = config.dataset_name+'/_data.sentences','w'\n\n for starting_text in keys:\n for n in range(num_sentences):\n words = model.generate_sentence(sess, starting_text=str(starting_text), temp=1.0)\n f_id.write((' '.join(words[:-1])+'\\n'))\n\n\n\ndef save_Embeddings_DNNModel():\n #global cfg\n print('############## Save Embeddings Module ')\n config = deepcopy(cfg)\n config.batch_size = config.num_steps = 1\n model,sess = init_Model(config)\n with sess:\n model.add_summaries(sess)\n ignore_list = ['0','<eos>','<unk>'] \n keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list] \n keys.sort()\n vocab_len = len(keys)\n enc_words = np.array([model.data_sets.train.vocab.encode(str(word)) for word in keys])\n #embed = np.zeros([vocab_len,model.config.mRNN._embed_size])\n embed = np.zeros([vocab_len,model.config.mRNN._hidden_size])\n\n #eos_embed = model.get_embedding(sess,['<eos>'])\n eos_embed = model.get_hidden_state(sess,[model.data_sets.train.vocab.encode('<eos>')],None)\n\n for i,word in enumerate(enc_words):\n embed[i] = model.get_embedding(sess,[word],)\n #embed[i] = model.get_hidden_state(sess,[word],eos_embed)\n\n fn = config.embed_dir+config.dataset_name+'_data.embd'\n np.savetxt(fn,embed, delimiter=',')\n #np.savetxt(fn,normalize(embed,norm='l2',axis=1), delimiter=',')\n print('--------- Embeddings are saved to '+fn)\n\n \ndef save_embed(path, embed): #UNUSED\n\tf = open(path, 'w')\n\tfor idx, item in enumerate(embed):\n\t\tf.write(str(idx))\n\t\tfor val in item:\n\t\t\tf.write(' ' + str(val))\n\t\tf. write('\\n')\n\tf.close()\n\ndef visualize_Embeddings_DNNModel():\n #global cfg\n print('############## Visualize Embeddings Module ')\n config = deepcopy(cfg)\n tf.reset_default_graph()\n sess = tf.Session()\n fn = config.embed_dir+config.dataset_name+'_data.embd'\n #fn = config.embed_dir+'karate_structure_features'\n print('--------- Embeddings are loaded from dir: '+fn)\n embed = np.loadtxt(fn,delimiter=',')\n embed_var = tf.Variable(embed,name='embed_var')\n init = tf.initialize_all_variables()\n sess.run(init)\n\n checkpoint_file = config.logs_dir, 'Embedding'\n saver = tf.train.Saver({\"embedding\": embed_var},write_version=tf.train.SaverDef.V2)\n fn = config.embed_dir+'embedding_ckpt'\n saver.save(sess,fn, global_step=1)\n print('--------- To Visualize Embeddings load tf:0.12v tensorboard in directory: '+fn)\n\n\ndef generate_and_reconstruct():\n print('############## Reconstruct Text Module ')\n config = deepcopy(cfg)\n config.batch_size = config.num_steps = 1\n model,sess = init_Model(config)\n\n ignore_list = ['0','<eos>','<unk>'] \n keys = [word for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]\n nodes = len(keys)\n #adj_mat = np.zeros((nodes, nodes), dtype=int)\n adj_list = {}\n walk_count = 10\n \n with sess:\n for idx, node in enumerate(keys):\n if idx%100 == 0:\n print(\"Reconstructing for node: \",idx)\n for i in range(walk_count): \n walk = model.generate_sentence(sess, starting_text=node, temp=1.0)\n for n1, n2 in zip(walk[:-2], walk[1:-1]):\n #Subtracting one to start node count from 0\n n1, n2 = int(n1)-1, int(n2)-1\n weight = adj_list.get((n1, n2), 0)\n adj_list[(n1,n2)] = weight+1\n #adj_mat[int(n1)-1][int(n2)-1] += 1\n\n adj_mat = sps.lil_matrix((nodes, nodes))\n for k, v in adj_list.items():\n i,j = k\n adj_mat[i,j] = v\n\n #adj_mat = scipy.sparse.coo_matrix(adj_mat)\n savemat(config.results_dir+'reconstructed_'+cfg.dataset_name, adj_mat)\n print('------------ Reconstruction file saved: ', 'reconstructed_'+cfg.dataset_name )\n\ndef classify_and_save():\n print('############## Classify and save Module ')\n config = deepcopy(cfg)\n fn = config.embed_dir+config.dataset_name+'_data.embd'\n\n e_conf = Eval_Config.Config(config.dataset_name+'/', fn)\n #NN.evaluate(e_conf)\n liblinear.evaluate(e_conf)\n print(\"------------ Results saved to: \", e_conf.results_folder) \n\ndef predict_and_save():\n print('############## Save Label Prediction Module ')\n config = deepcopy(cfg)\n model,sess = init_Model(config)\n vocab = model.data_sets.train.vocab\n\n all_labels = loadmat(config.label_dir)['labels']\n nodes = all_labels.shape[0]\n all_labels = input_data.get_labels(all_labels, [True]*nodes, vocab)\n\n pred_labels = model.predict_results(sess, all_labels, return_labels=True)\n ordered_labels = np.zeros(all_labels.shape)\\\n\n #Re-order the predictions based on actual node number \n #pred_labels are in order of keys sequence of all_labels\n for idx, k in enumerate(all_labels.keys()):\n ordered_labels[int(vocab.decode(k)) - 1] = pred_labels[idx]\n\n #Ignore the first column of label prediction (It is used for marking <EOS> and unlabeled data)\n ordered_labels = ordered_labels[:,1:]\n\n fn = config.result_dir+config.dataset_name+'_predicted_labels.csv'\n np.savetxt(fn, ordered_labels, delimiter=',')\n\ndef execute():\n with tf.device('/gpu:0'):\n err = train_DNNModel() \n #test_DNNModel() \n #interactive_generate_text_DNNModel()\n save_Embeddings_DNNModel()\n visualize_Embeddings_DNNModel() \n #generate_and_reconstruct()\n classify_and_save() \n predict_and_save() \n return err\n \nif __name__ == \"__main__\":\n #remove parameter dictionary\n \n meta_param = {#('dataset_name',):['blogcatalog_ncc'],\n #('solver', 'learning_rate'): [0.001],\n #('retrain',): [False],\n ('debug',): [False],\n ('max_epochs',): [1000]\n }\n\n variations = len(meta_param[('debug',)])\n\n #Make sure number of variants are equal\n for k,v in meta_param.items():\n assert len(v) == variations \n \n \n for idx in range(variations): \n for k,vals in meta_param.items():\n x = cfg\n if len(k) > 1:\n x = getattr(x, k[0])\n setattr(x, k[-1], vals[idx])\n print(k[-1], vals[idx])\n\n cfg.create(cfg.dataset_name)#\"run-\"+str(idx))\n cfg.init2()\n\n #All set... GO! \n execute()\n print('\\n\\n ===================== \\n\\n')\n", "import numpy as np\n\n \ndef write_results(cfg, all_results):\n for percent, results in all_results.items():\n f = open(cfg.results_folder+str(percent)+'.txt','w')\n\n for metric in cfg.metrics:\n f.write(metric+ '\\t')\n f.write('\\n')\n\n arr = np.zeros((len(results.values()[0]), len(results)))#[[]]*len(results.values()[0])\n for shuff, vals in results.items():\n for idx, val in enumerate(vals):\n arr[idx][shuff-1] = val\n f.write(str(val) + '\\t')\n f.write('\\n') \n\n #f.write('\\n')\n for v in range(arr.shape[0]):\n f.write(str(np.mean(arr[v][:]))+ '\\t')\n\n f.write('\\n')\n for v in range(arr.shape[0]):\n f.write(str(np.var(arr[v][:]))+ '\\t')\n \n f.close()\n\n" ]
[ [ "numpy.linalg.svd", "tensorflow.get_variable", "tensorflow.constant", "tensorflow.concat", "tensorflow.matmul", "tensorflow.sigmoid", "numpy.random.normal", "tensorflow.tanh", "numpy.identity", "numpy.prod", "tensorflow.split", "numpy.zeros" ], [ "tensorflow.device", "tensorflow.concat", "tensorflow.cast", "tensorflow.train.SessionManager", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Variable", "scipy.io.loadmat", "tensorflow.ConfigProto", "tensorflow.initialize_all_variables", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.train.Saver", "numpy.zeros", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.no_op", "numpy.savetxt", "numpy.tile", "tensorflow.train.SummaryWriter", "tensorflow.variable_scope", "numpy.loadtxt", "scipy.sparse.lil_matrix" ], [ "numpy.var", "numpy.mean" ] ]
franprix/intake-excel
[ "b2c56d4513bef66240f476f31d7fdb72911da53d" ]
[ "tests/test_intake_excel.py" ]
[ "import pandas as pd\n\nimport intake\nfrom intake_excel import ExcelSource\n\nfrom .utils import temp_plain_excel, df\n\ndef test_fixture(temp_plain_excel):\n d2 = pd.read_excel('temp_plain_excel.xlsx')\n assert df.equals(d2)\n\ndef test_simple(temp_plain_excel):\n d2 = ExcelSource('temp_plain_excel.xlsx').read()\n assert df.equals(d2)\n" ]
[ [ "pandas.read_excel" ] ]
aureguerrero/sahi
[ "fe3e8521d9ac91d7c2f431a8907bb4b8af668d30" ]
[ "sahi/prediction.py" ]
[ "# OBSS SAHI Tool\n# Code written by Fatih C Akyon, 2020.\n\nimport copy\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nfrom PIL import Image\nfrom osgeo import gdal, ogr\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import HuberRegressor\nfrom sklearn.linear_model import TheilSenRegressor\nimport os\nimport PIL\nimport cv2\nimport scipy.spatial\nfrom skimage.morphology import skeletonize\nfrom skimage import measure\nfrom scipy.ndimage import rotate\n\nfrom sahi.annotation import ObjectAnnotation\nfrom sahi.utils.coco import CocoAnnotation, CocoPrediction\nfrom sahi.utils.cv import read_image_as_pil, visualize_object_predictions\nfrom sahi.utils.file import Path\n\n\ndef centroide(mask,shift_amount=[0,0]):\n if mask is not None:\n c=np.array(np.where(skeletonize(mask))).transpose()\n aux=np.sum(scipy.spatial.distance_matrix(c,c,p=2),axis=1)\n minimo=np.min(aux)\n aux=c[np.where(aux==minimo)[0][0],:]\n c=aux\n return [c[1]+shift_amount[0],c[0]+shift_amount[1]]\n\n\nclass PredictionScore:\n def __init__(self, value: float):\n \"\"\"\n Arguments:\n score: prediction score between 0 and 1\n \"\"\"\n # if score is a numpy object, convert it to python variable\n if type(value).__module__ == \"numpy\":\n value = copy.deepcopy(value).tolist()\n # set score\n self.value = value\n\n def is_greater_than_threshold(self, threshold):\n \"\"\"\n Check if score is greater than threshold\n \"\"\"\n return self.value > threshold\n\n def __repr__(self):\n return f\"PredictionScore: <value: {self.value}>\"\n\n\nclass ObjectPrediction(ObjectAnnotation):\n \"\"\"\n Class for handling detection model predictions.\n \"\"\"\n\n def __init__(\n self,\n bbox: Optional[List[int]] = None,\n category_id: Optional[int] = None,\n category_name: Optional[str] = None,\n bool_mask: Optional[np.ndarray] = None,\n score: Optional[float] = 0,\n shift_amount: Optional[List[int]] = [0, 0],\n full_shape: Optional[List[int]] = None,\n ):\n \"\"\"\n Creates ObjectPrediction from bbox, score, category_id, category_name, bool_mask.\n\n Arguments:\n bbox: list\n [minx, miny, maxx, maxy]\n score: float\n Prediction score between 0 and 1\n category_id: int\n ID of the object category\n category_name: str\n Name of the object category\n bool_mask: np.ndarray\n 2D boolean mask array. Should be None if model doesn't output segmentation mask.\n shift_amount: list\n To shift the box and mask predictions from sliced image\n to full sized image, should be in the form of [shift_x, shift_y]\n full_shape: list\n Size of the full image after shifting, should be in\n the form of [height, width]\n \"\"\"\n self.score = PredictionScore(score) \n \n # self.bbox.to_voc_bbox()=bbox.to_voc_bbox()\n super().__init__(\n bbox=bbox,\n category_id=category_id,\n bool_mask=bool_mask,\n category_name=category_name,\n shift_amount=shift_amount,\n full_shape=full_shape,\n )\n\n def centroide(self):\n if self.mask:\n return centroide(self.mask.bool_mask,[self.bbox.to_voc_bbox()[0],self.bbox.to_voc_bbox()[1]])\n \n def get_shifted_object_prediction(self):\n \"\"\"\n Returns shifted version ObjectPrediction.\n Shifts bbox and mask coords.\n Used for mapping sliced predictions over full image.\n \"\"\"\n if self.mask:\n return ObjectPrediction(\n bbox=self.bbox.get_shifted_box().to_voc_bbox(),\n category_id=self.category.id,\n score=self.score.value,\n bool_mask=self.mask.get_shifted_mask().bool_mask,\n category_name=self.category.name,\n shift_amount= self.mask.get_shifted_mask().shift_amount,#[0, 0],\n full_shape=self.mask.get_shifted_mask().full_shape,\n )\n else:\n return ObjectPrediction(\n bbox=self.bbox.get_shifted_box().to_voc_bbox(),\n category_id=self.category.id,\n score=self.score.value,\n bool_mask=None,\n category_name=self.category.name,\n shift_amount=[0, 0],\n full_shape=None,\n )\n\n def to_coco_prediction(self, image_id=None):\n \"\"\"\n Returns sahi.utils.coco.CocoPrediction representation of ObjectAnnotation.\n \"\"\"\n if self.mask:\n coco_prediction = CocoPrediction.from_coco_segmentation(\n segmentation=self.mask.to_coco_segmentation(),\n category_id=self.category.id,\n category_name=self.category.name,\n score=self.score.value,\n image_id=image_id,\n )\n else:\n coco_prediction = CocoPrediction.from_coco_bbox(\n bbox=self.bbox.to_coco_bbox(),\n category_id=self.category.id,\n category_name=self.category.name,\n score=self.score.value,\n image_id=image_id,\n )\n return coco_prediction\n\n def to_fiftyone_detection(self, image_height: int, image_width: int):\n \"\"\"\n Returns fiftyone.Detection representation of ObjectPrediction.\n \"\"\"\n try:\n import fiftyone as fo\n except ImportError:\n raise ImportError('Please run \"pip install -U fiftyone\" to install fiftyone first for fiftyone conversion.')\n\n x1, y1, x2, y2 = self.bbox.to_voc_bbox()\n rel_box = [x1 / image_width, y1 / image_height, (x2 - x1) / image_width, (y2 - y1) / image_height]\n fiftyone_detection = fo.Detection(label=self.category.name, bounding_box=rel_box, confidence=self.score.value)\n return fiftyone_detection\n\n def __repr__(self):\n return f\"\"\"ObjectPrediction<\n bbox: {self.bbox},\n mask: {self.mask},\n score: {self.score},\n category: {self.category}>\"\"\"\n\n\nclass PredictionResult:\n def __init__(\n self,\n object_prediction_list: List[ObjectPrediction],\n image: Union[Image.Image, str, np.ndarray],\n durations_in_seconds: Optional[Dict] = None,\n ):\n self.image: Image.Image = read_image_as_pil(image)\n self.image_width, self.image_height = self.image.size\n self.object_prediction_list: List[ObjectPrediction] = object_prediction_list\n self.durations_in_seconds = durations_in_seconds\n self.centroides=[i.centroide() for i in object_prediction_list]\n \n \n def clases(self):\n clases=[]\n for objeto in self.object_prediction_list:\n clases.append(objeto.category.id)\n return clases\n \n def mascaras(self):\n mask=np.zeros((self.image_height,self.image_width),dtype=np.uint8)\n for objeto in self.object_prediction_list:\n mask1 = objeto.mask.bool_mask*1#(objeto.category.id+1)\n mask[objeto.bbox.to_voc_bbox()[1]:objeto.bbox.to_voc_bbox()[1]+np.shape(mask1)[0],\n objeto.bbox.to_voc_bbox()[0]:objeto.bbox.to_voc_bbox()[0]+np.shape(mask1)[1]]=mask[objeto.bbox.to_voc_bbox()[1]:\n objeto.bbox.to_voc_bbox()[1]+np.shape(mask1)[0],\n objeto.bbox.to_voc_bbox()[0]:objeto.bbox.to_voc_bbox()[0]\n +np.shape(mask1)[1]]+mask1\n mask[np.where(mask>0)]=objeto.category.id+1\n return mask\n \n def lineas(self, fft_threshold=0.93,nminppl=10,clear =None):\n image=self.mascaras().copy()*1\n centros=np.array(self.centroides.copy())\n transf = np.fft.fft2(image-np.mean(image))\n transf_abs = np.abs(transf)\n transf_max = transf_abs.max()\n mascara=self.mascaras().copy()\n transf_abs[transf_abs<transf_max*fft_threshold]=0\n ifft = np.fft.ifft2(transf_abs*transf)\n ifft = (ifft / np.max(ifft))+1\n img_lines_aux = np.abs(ifft)\n img_lines_aux_norm=img_lines_aux/img_lines_aux.max()\n img_lines = np.zeros_like(img_lines_aux_norm)\n img_lines [ img_lines_aux_norm < 0.2] = 1\n lineas_entre_siembra = skeletonize(img_lines)\n extrem_izq=np.percentile(np.where(lineas_entre_siembra==True)[1],5)\n extrem_derec=np.percentile(np.where(lineas_entre_siembra==True)[1],95)\n lineas2=np.array([np.where(lineas_entre_siembra[:,int(extrem_izq)]==True),np.where(lineas_entre_siembra[:,int(extrem_derec)]==True)]).squeeze()\n rectas=[np.poly1d([(lineas2[1,i]-lineas2[0,i])/(extrem_derec-extrem_izq),-(lineas2[1,i]-lineas2[0,i])/(extrem_derec-extrem_izq)*extrem_izq+lineas2[0,i]]) for i in range(len(lineas2[0]))]\n lineas_d_surcos=[]\n object_prediction_list=[]\n centro2=[]\n \n if len(np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True)[0])>1:\n ubica=np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True)[0]\n if len(np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True)[0])<nminppl:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica]\n self.centroides=[self.centroides[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n else:\n datos=centros[np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True),:].squeeze()\n # huber = HuberRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n # ubica2=np.where(np.abs(datos[:,1]-huber.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n theilsen=TheilSenRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n ubica2=np.where(np.abs(datos[:,1]-theilsen.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n\n\n if clear is not None:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica[ubica2[0]]]\n self.centroides=[self.centroides[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n# object_prediction_list.extend([self.object_prediction_list[t] for t in u])\n\n # lineas_d_surcos.append(np.poly1d([huber.coef_[0],huber.intercept_]))\n lineas_d_surcos.append(np.poly1d([theilsen.coef_[0],theilsen.intercept_]))\n \n for i in range(len(rectas)-1):\n if len(np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True)[0])>1:\n ubica=np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True)[0]\n if len(np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True)[0])<nminppl:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica]\n self.centroides=[self.centroides[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n else:\n datos=centros[np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True),:].squeeze()\n # huber = HuberRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n # ubica2=np.where(np.abs(datos[:,1]-huber.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n theilsen=TheilSenRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n ubica2=np.where(np.abs(datos[:,1]-theilsen.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n\n\n if clear is not None:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica[ubica2[0]]]\n self.centroides=[self.centroides[t] for t in u]\n# object_prediction_list=[self.object_prediction_list[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n \n\n # lineas_d_surcos.append(np.poly1d([huber.coef_[0],huber.intercept_]))\n lineas_d_surcos.append(np.poly1d([theilsen.coef_[0],theilsen.intercept_]))\n \n if len(np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True)[0])>1:\n ubica=np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True)[0]\n if len(np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True)[0])<nminppl:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica]\n self.centroides=[self.centroides[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n else:\n datos=centros[np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True),:].squeeze()\n # huber = HuberRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n # ubica2=np.where(np.abs(datos[:,1]-huber.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n theilsen=TheilSenRegressor().fit(np.expand_dims(datos[:,0],axis=1),datos[:,1])\n ubica2=np.where(np.abs(datos[:,1]-theilsen.predict(np.expand_dims(datos[:,0],axis=-1)))>0.5*np.mean(np.array([self.object_prediction_list[l].mask.shape[0] for l in ubica])))\n\n\n if clear is not None:\n u=[p for p in range(len(self.object_prediction_list)) if p not in ubica[ubica2[0]]]\n self.centroides=[self.centroides[t] for t in u]\n# object_prediction_list=[self.object_prediction_list[t] for t in u]\n centros=centros[u]\n self.object_prediction_list=[self.object_prediction_list[t] for t in u]\n \n\n # lineas_d_surcos.append(np.poly1d([huber.coef_[0],huber.intercept_]))\n lineas_d_surcos.append(np.poly1d([theilsen.coef_[0],theilsen.intercept_]))\n \n #----------\n \n id_surco=0\n info_d_surcos=[]\n if len(np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True)[0])>nminppl:\n ubica=np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True)[0]\n datos=centros[np.where((centros[:,1]<rectas[0](centros[:,0]))*(centros[:,1]>0)== True),:].squeeze()\n orden=np.sort(datos[:,0])\n\n info_d_surcos.append([id_surco,ubica[[np.where(datos[:,0]==np.sort(datos[:,0])[i])[0][0] for i in range(len(datos[:,0]))]]])\n id_surco=id_surco+1\n object_prediction_list.extend([self.object_prediction_list[u] for u in ubica])\n centro2.extend([self.centroides[u] for u in ubica])\n \n \n for i in range(len(rectas)-1):\n if len(np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True)[0])>nminppl:\n ubica=np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True)[0]\n datos=centros[np.where((centros[:,1]<rectas[i+1](centros[:,0]))*(centros[:,1]>rectas[i](centros[:,0]))== True),:].squeeze()\n orden=np.sort(datos[:,0])\n\n info_d_surcos.append([id_surco,ubica[[np.where(datos[:,0]==np.sort(datos[:,0])[i])[0][0] for i in range(len(datos[:,0]))]]])\n id_surco=id_surco+1\n object_prediction_list.extend([self.object_prediction_list[u] for u in ubica])\n centro2.extend([self.centroides[u] for u in ubica])\n \n if len(np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True)[0])>nminppl:\n ubica=np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True)[0]\n datos=centros[np.where((centros[:,1]>rectas[-1](centros[:,0]))*(centros[:,1]<mascara.shape[0])== True),:].squeeze()\n orden=np.sort(datos[:,0])\n\n info_d_surcos.append([id_surco,ubica[[np.where(datos[:,0]==np.sort(datos[:,0])[i])[0][0] for i in range(len(datos[:,0]))]]])\n id_surco=id_surco+1\n object_prediction_list.extend([self.object_prediction_list[u] for u in ubica])\n centro2.extend([self.centroides[u] for u in ubica])\n \n self.object_prediction_list=object_prediction_list\n self.centroides=centro2\n \n return lineas_d_surcos,info_d_surcos\n def info(self,proporcion=0.5,d_surco_metros=0.52):\n lineas,info_d_surcos=self.lineas()\n centros=self.centroides\n rotacion=np.arctan(np.mean(np.array([lineas[i][1] for i in range(len(lineas))])))\n siembra=np.zeros((self.image_height,self.image_width),np.uint8)\n for i in range(len(lineas)):\n cv2.line(siembra,(0,int(lineas[i](0))),(self.image_width-1,int(lineas[i](self.image_width-1))),(255,255,255),2)\n siembra_rotada= rotate(siembra, rotacion*180/np.pi, reshape=False, mode='nearest')\n height,width = siembra_rotada.shape\n\n y_crop_top = int(height*(proporcion/2))\n y_crop_bottom = -y_crop_top\n x_crop_left = int(width*(proporcion/2))\n x_crop_rigth = -x_crop_left\n \n skele_new = skeletonize(siembra_rotada/255)\n \n transecta = skele_new[y_crop_top:y_crop_bottom,int(width*0.5)]\n entreLineas = np.where(transecta==1)\n if len(entreLineas[0])<2:\n transecta=skele_new[:,int(width*0.5)]\n entreLineas = np.where(transecta==1)\n\n y_crop_top_modified = y_crop_top+entreLineas[0][0]\n y_crop_bottom_modified = y_crop_top+entreLineas[0][-1]\n \n Nsurcos = len(entreLineas[0])\n if Nsurcos>1:\n pix_surco = ( entreLineas[0][-1] - entreLineas[0][0] ) / (Nsurcos-1)\n else:\n pix_surco = entreLineas[0][0]\n \n resumen=[]\n for p,t in zip(lineas,info_d_surcos):\n area=[len(np.where(self.object_prediction_list[l].mask.bool_mask==True)[0])*(d_surco_metros*100/(pix_surco))**2 for l in t[1]]\n dist=[np.sqrt((p(centros[t[1][l]][0])-p(centros[t[1][l+1]][0]))**2\n +(centros[t[1][l]][0]-centros[t[1][l+1]][0])**2)*(d_surco_metros*100/(pix_surco)) for l in range(len(t[1])-1)]\n dist_real=[np.sqrt((centros[t[1][l]][1]-centros[t[1][l+1]][1])**2\n +(centros[t[1][l]][0]-centros[t[1][l+1]][0])**2)*(d_surco_metros*100/(pix_surco)) for l in range(len(t[1])-1)]\n resumen.append({'id':t[0],'recta': p,\n 'largo':np.sqrt((p(centros[t[1][0]][0])-p(centros[t[1][-1]][0]))**2\n +(centros[t[1][0]][0]-centros[t[1][-1]][0])**2)*(d_surco_metros*100/(pix_surco))\n ,'plantas':t[1],\n 'area':area,\n 'stadist_area':{'cant_plt':len(t[1]),'min':np.min(np.array(area)), 'max':np.max(np.array(area)),\n 'promedio':np.mean(np.array(area)),'desv_std':np.std(np.array(area)),\n 'CV':np.std(np.array(area))/np.mean(np.array(area))},\n 'distancias':dist,\n 'stadist_dist':{'min':np.min(np.array(dist)), 'max':np.max(np.array(dist)),\n 'promedio':np.mean(np.array(dist)),'desv_std':np.std(np.array(dist)),\n 'CV':np.std(np.array(dist))/np.mean(np.array(dist))},\n 'distancias_real':dist_real,\n 'stadist_dist_real':{'min':np.min(np.array(dist_real)), 'max':np.max(np.array(dist_real)),\n 'promedio':np.mean(np.array(dist_real)),'desv_std':np.std(np.array(dist_real)),\n 'CV':np.std(np.array(dist_real))/np.mean(np.array(dist_real))}})\n resumen.append({'id':'total','plantas':[],'area':[],'distancias':[],'distancias_real':[],'stadist_area':{}, 'stadist_dist':{},'stadist_dist_real':{}})\n for i in range(len(lineas)):\n resumen[-1]['plantas'].extend(resumen[i]['plantas'])\n resumen[-1]['area'].extend(resumen[i]['area'])\n resumen[-1]['distancias'].extend(resumen[i]['distancias'])\n resumen[-1]['distancias_real'].extend(resumen[i]['distancias_real'])\n \n# resumen[-1]['plantas']=list(set().union(resumen[-1]['plantas'],resumen[i]['plantas']))\n# resumen[-1]['area']=list(set().union(resumen[-1]['area'],resumen[i]['area']))\n# resumen[-1]['distancias']=list(set().union(resumen[-1]['distancias'],resumen[i]['distancias']))\n# resumen[-1]['distancias_real']=list(set().union(resumen[-1]['distancias'],resumen[i]['distancias_real']))\n area=np.array(resumen[-1]['area'])\n dist=np.array(resumen[-1]['distancias'])\n dist_real=np.array(resumen[-1]['distancias_real'])\n resumen[-1]['stadist_area']={'cant_plt':len(resumen[-1]['plantas']),'min':np.min(np.array(area)),\n 'max':np.max(np.array(area)),\n 'promedio':np.mean(np.array(area)),'desv_std':np.std(np.array(area)),\n 'CV':np.std(np.array(area))/np.mean(np.array(area))}\n\n resumen[-1]['stadist_dist']={'min':np.min(np.array(dist)), 'max':np.max(np.array(dist)),\n 'promedio':np.mean(np.array(dist)),'desv_std':np.std(np.array(dist)),\n 'CV':np.std(np.array(dist))/np.mean(np.array(dist))}\n \n resumen[-1]['stadist_dist_real']={'min':np.min(np.array(dist_real)), 'max':np.max(np.array(dist_real)),\n 'promedio':np.mean(np.array(dist_real)),'desv_std':np.std(np.array(dist_real)),\n 'CV':np.std(np.array(dist_real))/np.mean(np.array(dist_real))}\n \n return {'rotacion': rotacion*180/np.pi,'resolucion_rotacion' : d_surco_metros/pix_surco,'resolucion_orig': d_surco_metros/(pix_surco*np.cos(rotacion)),'resumen':resumen}\n \n def export_visuals(self, export_dir: str = \"demo_data/\", export_file: str = \"prediction_visual\", text_size: float = None, text_th: float = None, rect_th: int = None, \n etiqueta: int =None, centro: int = None, lineas: int =None, clear=None, export_format: str = \"png\"):\n \n Path(export_dir).mkdir(parents=True, exist_ok=True)\n image=np.array(self.image)\n mascara=self.mascaras()\n r = np.zeros_like(mascara).astype(np.uint8)\n g = np.zeros_like(mascara).astype(np.uint8)\n b = np.zeros_like(mascara).astype(np.uint8)\n colors = Colors()\n color = colors(self.object_prediction_list[0].category.id)\n (r[mascara > 0], g[mascara > 0], b[mascara >0]) = color\n \n rgb_mask = np.stack([r, g, b], axis=2)\n image = cv2.addWeighted(image, 1, rgb_mask, 0.4, 0)\n \n if centro is not None or centro !=0:\n centro=self.centroides\n ptos=np.zeros_like(image,dtype=np.uint8)\n centro=np.array(centro)\n ptos[centro[:,1],centro[:,0],:]=255\n kernel = np.ones((7,7),np.uint8)\n image = cv2.addWeighted(image, 1, cv2.dilate(ptos,kernel,iterations = 1), 0.8, 0)\n# for i in centro:\n# cv2.circle(image, i, 7, (255, 255, 255), -1)\n if lineas is not None or lineas !=0:\n lineas,info_d_surcos=self.lineas()\n for i in lineas:\n cv2.line(image,(0,int(i(0))),(self.image_width-1,int(i(self.image_width-1))),(255,255,255),5)\n \n if etiqueta is not None or etiqueta !=0:\n rect_th = rect_th or max(round(sum(image.shape) / 2 * 0.001), 1)\n # set text_th for category names\n text_th = text_th or max(rect_th - 1, 1)\n # set text_size for category names\n text_size = text_size or rect_th / 3\n # add bbox and mask to image if present\n for object_prediction in self.object_prediction_list:\n bbox = object_prediction.bbox.to_voc_bbox()\n category_name = object_prediction.category.name\n score = object_prediction.score.value\n # set bbox points\n p1, p2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3]))\n # visualize boxes\n cv2.rectangle(\n image,\n p1,\n p2,\n color=color,\n thickness=rect_th,\n )\n # arange bounding box text location\n label = f\"{category_name} {score:.2f}\"\n w, h = cv2.getTextSize(label, 0, fontScale=text_size, thickness=text_th)[0] # label width, height\n outside = p1[1] - h - 3 >= 0 # label fits outside box\n p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3\n # add bounding box text\n cv2.rectangle(image, p1, p2, color, -1, cv2.LINE_AA) # filled\n cv2.putText(\n image,\n label,\n (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),\n 0,\n text_size,\n (255, 255, 255),\n thickness=text_th,\n )\n \n if export_dir:\n # save inference result\n save_path = os.path.join(export_dir, export_file + \".\" + export_format)\n cv2.imwrite(save_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n \n# visualize_object_predictions(\n# image=np.ascontiguousarray(image),\n# etiqueta=etiqueta,\n# object_prediction_list=self.object_prediction_list,\n# rect_th=rect_th,\n# text_size=text_size,\n# text_th=None,\n# color=None,\n# output_dir=export_dir,\n# file_name=export_file,\n# export_format=\"png\",\n# )\n \n \n def to_coco_annotations(self):\n coco_annotation_list = []\n for object_prediction in self.object_prediction_list:\n coco_annotation_list.append(object_prediction.to_coco_prediction().json)\n return coco_annotation_list\n\n def to_coco_predictions(self, image_id: Optional[int] = None):\n coco_prediction_list = []\n for object_prediction in self.object_prediction_list:\n coco_prediction_list.append(object_prediction.to_coco_prediction(image_id=image_id).json)\n return coco_prediction_list\n\n def to_imantics_annotations(self):\n imantics_annotation_list = []\n for object_prediction in self.object_prediction_list:\n imantics_annotation_list.append(object_prediction.to_imantics_annotation())\n return imantics_annotation_list\n\n def to_fiftyone_detections(self):\n try:\n import fiftyone as fo\n except ImportError:\n raise ImportError('Please run \"pip install -U fiftyone\" to install fiftyone first for fiftyone conversion.')\n\n fiftyone_detection_list: List[fo.Detection] = []\n for object_prediction in self.object_prediction_list:\n fiftyone_detection_list.append(\n object_prediction.to_fiftyone_detection(image_height=self.image_height, image_width=self.image_width)\n )\n return fiftyone_detection_list\n\n \nclass Colors:\n # color palette\n def __init__(self):\n hex = (\n \"FF3838\",\n \"2C99A8\",\n \"FF701F\",\n \"6473FF\",\n \"CFD231\",\n \"48F90A\",\n \"92CC17\",\n \"3DDB86\",\n \"1A9334\",\n \"00D4BB\",\n \"FF9D97\",\n \"00C2FF\",\n \"344593\",\n \"FFB21D\",\n \"0018EC\",\n \"8438FF\",\n \"520085\",\n \"CB38FF\",\n \"FF95C8\",\n \"FF37C7\",\n )\n self.palette = [self.hex2rgb(\"#\" + c) for c in hex]\n self.n = len(self.palette)\n\n def __call__(self, i, bgr=False):\n c = self.palette[int(i) % self.n]\n return (c[2], c[1], c[0]) if bgr else c\n\n @staticmethod\n def hex2rgb(h): # rgb order\n return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))\n\ndef select_random_color():\n \"\"\"\n Selects random color.\n \"\"\"\n colors = [\n [0, 255, 0],\n [0, 0, 255],\n [255, 0, 0],\n [0, 255, 255],\n [255, 255, 0],\n [255, 0, 255],\n [80, 70, 180],\n [250, 80, 190],\n [245, 145, 50],\n [70, 150, 250],\n [50, 190, 190],\n ]\n return colors[random.randrange(0, 10)]\n" ]
[ [ "numpy.poly1d", "numpy.expand_dims", "numpy.sqrt", "numpy.max", "numpy.zeros_like", "numpy.mean", "numpy.where", "numpy.stack", "numpy.zeros", "sklearn.linear_model.TheilSenRegressor", "numpy.min", "scipy.ndimage.rotate", "numpy.array", "numpy.fft.ifft2", "numpy.abs", "numpy.cos", "numpy.sort", "numpy.ones", "numpy.shape" ] ]
joshuahigginson1/Python_Colour_Tools
[ "ded69c0ab92e62c0b5472bb3c9cf63089b1b844b" ]
[ "application/modules/average_colour.py" ]
[ "\"\"\"Task: This module finds the average colour of a polygon selection.\"\"\"\n\n# Imports --------------------------------------------------------------\nfrom skimage import draw\nimport numpy\n\n\n# Functions ------------------------------------------------------------\n\ndef ave_colour_from_selection(image, poly):\n \"\"\" This function gets the average pixel colour from a polygon selection.\n Credit to user Malibuoooo from StackOverflow.\n :param image: Our image file.\n :param poly: Numpy array of coordinates in which we are averaging.\n :return: Returns the average colour within our polygon.\n \"\"\"\n\n # Generates a list of pixels that match in our polygon.\n\n pixels = image[draw.polygon(poly[:, 1], poly[:, 0])]\n\n # Use the channels of each pixel to get averages and convert them to ints.\n # Return the average colour of every pixel.\n\n return numpy.average(pixels, 0).astype(int)\n" ]
[ [ "numpy.average" ] ]
gaugup/DiCE
[ "41dfde376ec3e5471d8e04899e639d2621b987f3" ]
[ "tests/test_dice_interface/test_dice_pytorch.py" ]
[ "import numpy as np\nimport pytest\n\nimport dice_ml\nfrom dice_ml.utils import helpers\n\ntorch = pytest.importorskip(\"torch\")\n\[email protected]\ndef pyt_exp_object():\n backend = 'PYT'\n dataset = helpers.load_adult_income_dataset()\n d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')\n ML_modelpath = helpers.get_adult_income_modelpath(backend=backend)\n m = dice_ml.Model(model_path= ML_modelpath, backend=backend)\n exp = dice_ml.Dice(d, m)\n return exp\n\nclass TestDiceTorchMethods:\n @pytest.fixture(autouse=True)\n def _initiate_exp_object(self, pyt_exp_object, sample_adultincome_query):\n self.exp = pyt_exp_object # explainer object\n self.exp.do_cf_initializations(total_CFs=4, algorithm=\"DiverseCF\", features_to_vary=\"all\") # initialize required params for CF computations\n\n # prepare query isntance for CF optimization\n query_instance = self.exp.data_interface.prepare_query_instance(query_instance=sample_adultincome_query, encoding='one-hot')\n self.query_instance = query_instance.iloc[0].values\n\n self.exp.initialize_CFs(self.query_instance, init_near_query_instance=True) # initialize CFs\n self.exp.target_cf_class = torch.tensor(1).float() # set desired class to 1\n\n # setting random feature weights\n np.random.seed(42)\n weights = np.random.rand(len(self.exp.data_interface.encoded_feature_names))\n self.exp.feature_weights_list = torch.tensor(weights)\n\n @pytest.mark.parametrize(\"yloss, output\",[(\"hinge_loss\", 10.8443), (\"l2_loss\", 0.9999), (\"log_loss\", 9.8443)])\n def test_yloss(self, yloss, output):\n self.exp.yloss_type = yloss\n loss1 = self.exp.compute_yloss()\n assert pytest.approx(loss1.data.detach().numpy(), abs=1e-4) == output\n\n def test_proximity_loss(self):\n self.exp.x1 = torch.tensor(self.query_instance)\n loss2 = self.exp.compute_proximity_loss()\n assert pytest.approx(loss2.data.detach().numpy(), abs=1e-4) == 0.0068 # proximity loss computed for given query instance and feature weights.\n\n @pytest.mark.parametrize(\"diversity_loss, output\",[(\"dpp_style:inverse_dist\", 0.0104), (\"avg_dist\", 0.1743)])\n def test_diversity_loss(self, diversity_loss, output):\n self.exp.diversity_loss_type = diversity_loss\n loss3 = self.exp.compute_diversity_loss()\n assert pytest.approx(loss3.data.detach().numpy(), abs=1e-4) == output\n\n def test_regularization_loss(self):\n loss4 = self.exp.compute_regularization_loss()\n assert pytest.approx(loss4.data.detach().numpy(), abs=1e-4) == 0.2086 # regularization loss computed for given query instance and feature weights.\n\n def test_final_cfs_and_preds(self, sample_adultincome_query):\n \"\"\"\n Tets correctness of final CFs and their predictions for sample query instance.\n \"\"\"\n dice_exp = self.exp.generate_counterfactuals(sample_adultincome_query, total_CFs=4, desired_class=\"opposite\")\n test_cfs = [[72.0, 'Private', 'HS-grad', 'Married', 'White-Collar', 'White', 'Female', 45.0, 0.691], [29.0, 'Private', 'Prof-school', 'Married', 'Service', 'White', 'Male', 42.0, 0.943], [52.0, 'Private', 'Doctorate', 'Married', 'Service', 'White', 'Female', 44.0, 0.97], [47.0, 'Private', 'Masters', 'Married', 'Service', 'White', 'Female', 73.0, 0.971]]\n assert dice_exp.final_cfs_list == test_cfs\n\n preds = [np.round(preds.flatten().tolist(), 3)[0] for preds in dice_exp.final_cfs_preds]\n assert pytest.approx(preds, abs=1e-3) == [0.691, 0.943, 0.97, 0.971]\n" ]
[ [ "numpy.random.seed" ] ]
billgoo/Rutgers-CS596-Topics-in-the-Foundations-of-Computer-Science
[ "ba4dbe59ef3a4ab625cee47e6a21886785dc19d5" ]
[ "Assignments/hw4_solution/problem3.py" ]
[ "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.scale as sc\n\n# numpy.random.normal(loc=0.0, scale=1.0, size=(m, n, k))\nLENGTH = 5\nTHETA_STAR = np.array([25, 22, 16, 132, 66])\nMU = 0.01\n\ndef generate_samples(t, length):\n X = np.random.normal(size=(t, length))\n W = np.random.normal(loc=0.0, scale=0.1, size=t)\n # print(X)\n # print(W)\n Y = np.array([np.dot(THETA_STAR.transpose(), X[i]) + W[i] for i in range(t)])\n\n return Y, X, W\n\n\ndef lms(Y, X, mu):\n theta = np.array([0.0 for _ in range(5)])\n e = []\n\n for t in range(Y.size):\n temp = theta\n theta = temp + np.dot((mu * (Y[t] - np.dot(temp.transpose(), X[t]))), X[t])\n # theta = theta + np.dot((MU * (Y[t] - np.dot(theta.transpose(), X[t]))), X[t])\n e.append(np.linalg.norm(theta - THETA_STAR) ** 2)\n\n return e, theta\n\n\n\nif __name__ == \"__main__\":\n t = 2600\n Y, X, W = generate_samples(t, LENGTH)\n iter_ = [i for i in range(Y.size)]\n\n # c is for question c) with higher mu and d with mu / 2 in the question d)\n error_c, theta_c = lms(Y, X, MU)\n error_d, theta_d = lms(Y, X, MU/2)\n\n print(theta_c)\n # print(min(error_c), error_c[-1])\n print(theta_d)\n # print(min(error_d), error_d[-1])\n \n plt.figure(figsize=(13,8))\n plt.plot(iter_, error_c, color='r', linestyle='-', label=\"mu = 0.01\")\n plt.plot(iter_, error_d, color='b', linestyle='-.', label=\"mu = 0.005\")\n plt.ylabel('||theta_T - theta_star||^2')\n plt.xlabel('No. of iterations')\n plt.semilogy(error_c)\n plt.semilogy(error_d)\n plt.legend()\n plt.show()\n\n print(\"end\")\n" ]
[ [ "matplotlib.pyplot.semilogy", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.linalg.norm", "matplotlib.pyplot.plot", "numpy.random.normal", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
mike-holcomb/tensorflow
[ "a331eaf593bb91eaafd8953f1bd7d502eaeee53a" ]
[ "tensorflow/contrib/distribute/python/keras_correctness_test_base.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Correctness tests for tf.keras using DistributionStrategy.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.contrib.distribute.python import combinations\nfrom tensorflow.contrib.distribute.python import mirrored_strategy\nfrom tensorflow.contrib.distribute.python import tpu_strategy\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.keras.engine import distributed_training_utils\n\n_RANDOM_SEED = 1337\n_EVAL_STEPS = 20\n_GLOBAL_BATCH_SIZE = 64\n\n# Note: Please make sure the tests in this file are also covered in\n# keras_backward_compat_test for features that are supported with both APIs.\n\n\nall_strategies = [\n combinations.default_strategy,\n combinations.one_device_strategy,\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus,\n combinations.core_mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_two_gpus,\n combinations.tpu_strategy, # steps_per_run=2\n combinations.tpu_strategy_one_step,\n]\n\n\ndef eager_mode_test_configuration():\n return combinations.combine(mode='eager',\n use_numpy=False,\n use_validation_data=False)\n\n\ndef graph_mode_test_configuration():\n return combinations.combine(mode='graph',\n use_numpy=[True, False],\n use_validation_data=[True, False])\n\n\ndef all_strategy_and_input_config_combinations():\n return (\n combinations.times(\n combinations.combine(distribution=all_strategies),\n eager_mode_test_configuration() + graph_mode_test_configuration()))\n\n\ndef strategies_for_embedding_models():\n \"\"\"Returns distribution strategies to test for embedding models.\n\n Since embedding models take longer to train, we disregard OneDeviceStrategy\n and DefaultStrategy in order to prevent testing timeouts.\n \"\"\"\n\n strategies = [s for s in all_strategies\n if not s.required_tpu and s.required_gpus is not None]\n strategies.append(combinations.tpu_strategy_loop_on_device)\n strategies.append(combinations.tpu_strategy_one_step_loop_on_device)\n return strategies\n\n\ndef test_combinations_for_embedding_model():\n return (\n combinations.times(\n combinations.combine(distribution=\n strategies_for_embedding_models()),\n (graph_mode_test_configuration() +\n eager_mode_test_configuration())))\n\n\nclass MaybeDistributionScope(object):\n \"\"\"Provides a context allowing no distribution strategy.\"\"\"\n\n def __init__(self, distribution):\n self._distribution = distribution\n self._scope = None\n\n def __enter__(self):\n if self._distribution:\n self._scope = self._distribution.scope()\n self._scope.__enter__()\n\n def __exit__(self, exc_type, value, traceback):\n if self._distribution:\n self._scope.__exit__(exc_type, value, traceback)\n self._scope = None\n\n\ndef batch_wrapper(dataset, batch_size, distribution, repeat=None):\n if repeat:\n dataset = dataset.repeat(repeat)\n # TPUs currently require fully defined input shapes, drop_remainder ensures\n # the input will have fully defined shapes.\n if isinstance(distribution, tpu_strategy.TPUStrategy):\n return dataset.batch(batch_size, drop_remainder=True)\n else:\n return dataset.batch(batch_size)\n\n\ndef get_batch_size(global_batch_size, distribution):\n batch_size = global_batch_size\n # TODO(b/118776054): Use global batch size for Keras/DS support.\n use_per_core_batch_size = (\n distribution and\n not distributed_training_utils.global_batch_size_supported(distribution))\n if use_per_core_batch_size:\n batch_size //= distribution.num_replicas_in_sync\n return batch_size\n\n\ndef get_correctness_test_inputs(use_numpy, use_validation_data,\n with_distribution, x_train, y_train, x_predict):\n \"\"\"Generates the inputs for correctness check when enable Keras with DS.\"\"\"\n training_epochs = 2\n global_batch_size = _GLOBAL_BATCH_SIZE\n batch_size = get_batch_size(global_batch_size, with_distribution)\n\n if use_numpy:\n training_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n 'epochs': training_epochs,\n 'shuffle': False,\n }\n\n if use_validation_data:\n eval_inputs = None\n training_inputs['validation_data'] = (x_train, y_train)\n else:\n eval_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n }\n predict_inputs = {\n 'x': np.array(x_predict, dtype=np.float32),\n }\n else:\n if len(x_train) < _GLOBAL_BATCH_SIZE * _EVAL_STEPS:\n # Currently, we cannot detech the size of a dataset. So, the eval steps is\n # hard coded.\n raise ValueError('x_train must have at least '\n '_GLOBAL_BATCH_SIZE * _EVAL_STEPS samples')\n # For dataset inputs, we do not pass batch_size to\n # keras.fit/evaluate/predict. The batch size is part of the dataset.\n train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))\n x = batch_wrapper(train_dataset, batch_size, with_distribution,\n repeat=training_epochs)\n\n training_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'epochs': training_epochs,\n 'shuffle': False,\n 'steps_per_epoch': len(x_train) // global_batch_size,\n }\n if use_validation_data:\n eval_inputs = None # Remove the eval_inputs\n eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))\n x = batch_wrapper(eval_dataset, batch_size, with_distribution)\n training_inputs['validation_data'] = x\n training_inputs['validation_steps'] = 5\n else:\n eval_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'steps': _EVAL_STEPS,\n }\n\n predict_batch_size = get_batch_size(len(x_predict), with_distribution)\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)\n predict_dataset = batch_wrapper(predict_dataset, predict_batch_size,\n with_distribution)\n predict_inputs = {\n 'steps': 1,\n 'x': predict_dataset,\n }\n\n return training_inputs, eval_inputs, predict_inputs\n\n\ndef fit_eval_and_predict(\n initial_weights, input_fn, model_fn, distribution=None):\n \"\"\"Generates results for fit/predict/evaluate for given model.\"\"\"\n model = model_fn(initial_weights=initial_weights, distribution=distribution)\n training_inputs, eval_inputs, predict_inputs = input_fn(distribution)\n\n result = {}\n result['training_history_1'] = model.fit(**training_inputs).history\n\n if eval_inputs is not None:\n result['eval_result_1'] = model.evaluate(**eval_inputs)\n\n result['weights_1'] = model.get_weights()\n\n if predict_inputs is not None:\n result['predict_result_1'] = model.predict(**predict_inputs)\n\n # Train and eval again to mimic user's flow.\n\n result['training_history_2'] = model.fit(**training_inputs).history\n\n if eval_inputs is not None:\n result['eval_result_2'] = model.evaluate(**eval_inputs)\n\n result['weights_2'] = model.get_weights()\n\n return result\n\n\ndef compare_results(results_with_ds, results_without_ds, distribution,\n testcase):\n \"\"\"Compares results of model compiled with/without distribution strategy.\"\"\"\n\n default_tolerance = 1e-5\n tol_table = {}\n\n if isinstance(distribution, (\n mirrored_strategy.MirroredStrategy,\n mirrored_strategy.CoreMirroredStrategy,\n distribute_lib._DefaultDistributionStrategy)): # pylint: disable=protected-access\n # TODO(b/119257215): Weights are not exactly the same, so use larger\n # tolerance for now. Predict should be related to weights.\n tol_table = {\n 'weights_1': 1e-4,\n 'weights_2': 1e-4,\n 'predict_result_1': 1e-4,\n }\n\n for key in results_with_ds:\n if (key.startswith('training_history') and\n isinstance(distribution, tpu_strategy.TPUStrategy) and\n distribution.extended.steps_per_run > 1):\n # TODO(b/119894254): Enable this test for all cases once the\n # underlying bug is fixed.\n continue\n\n tolerance = tol_table.get(key, default_tolerance)\n\n testcase.assertAllClose(\n results_with_ds[key],\n results_without_ds[key],\n atol=tolerance,\n rtol=tolerance,\n msg='Fail to assert {}.'.format(key))\n\n\ndef should_skip_tpu_with_eager(distribution):\n return (context.executing_eagerly() and\n isinstance(distribution, tpu_strategy.TPUStrategy))\n\n\nclass LearningRateBatchScheduler(keras.callbacks.Callback):\n \"\"\"Scheduler that dynamically sets the learning rate of model.\"\"\"\n\n def __init__(self, update_freq=None):\n self._update_freq = update_freq\n\n def on_batch_begin(self, batch, logs=None):\n if self._update_freq and batch % self._update_freq != 0:\n return\n\n # To avoid divergence, limit the value range.\n lr = 0.001 * (batch % 10)\n keras.backend.set_value(self.model.optimizer.lr, lr)\n\n\nclass TestDistributionStrategyCorrectnessBase(test.TestCase,\n parameterized.TestCase):\n \"\"\"Model agnostic testing infra to test correctness of Keras models.\"\"\"\n\n def set_up_test_config(self, use_numpy=False,\n use_validation_data=False,\n with_batch_norm=False):\n self.use_numpy = use_numpy\n self.use_validation_data = use_validation_data\n self.with_batch_norm = with_batch_norm\n\n keras.backend.set_image_data_format('channels_last')\n np.random.seed(_RANDOM_SEED)\n random_seed.set_random_seed(_RANDOM_SEED)\n\n def get_data(self):\n num_samples = 10000\n x_train = np.random.randint(0, 2, num_samples)\n x_train = np.reshape(x_train, (num_samples, 1))\n y_train = x_train\n return (x_train.astype('float32'), y_train.astype('float32'), None)\n\n def get_model(self, distribution=None):\n raise NotImplementedError\n\n def skip_unsupported_test_configuration(self, distribution):\n if should_skip_tpu_with_eager(distribution):\n self.skipTest('TPUStrategy does not support eager mode now.')\n\n if context.executing_eagerly() and self.use_numpy:\n self.skipTest('Numpy as inputs is not supported with strategy in eager.')\n\n if context.executing_eagerly() and self.use_validation_data:\n self.skipTest('TODO(hongjunchoi): Add test logic for using validation '\n 'data for eager execution.')\n return\n\n def run_correctness_test(self,\n distribution,\n use_numpy,\n use_validation_data,\n with_batch_norm=False):\n with self.cached_session():\n self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)\n self.skip_unsupported_test_configuration(distribution)\n\n # Train, eval, and predict datasets are created with the same input numpy\n # arrays.\n x_train, y_train, x_predict = self.get_data()\n\n # The model is built once and the initial weights are saved.\n # This is used to initialize the model for both the distribution and\n # non-distribution run.\n model = self.get_model()\n initial_weights = model.get_weights()\n\n def input_fn(dist):\n return get_correctness_test_inputs(\n use_numpy, use_validation_data, dist, x_train, y_train, x_predict)\n\n results_with_ds = fit_eval_and_predict(\n initial_weights, input_fn=input_fn, model_fn=self.get_model,\n distribution=distribution)\n results_without_ds = fit_eval_and_predict(\n initial_weights, input_fn=input_fn, model_fn=self.get_model,\n distribution=None)\n\n # First, special case, for multi-replica distributed training, batch norm\n # is not aggregated globally. So it is expected to have different weights.\n if (self.with_batch_norm and\n distribution.num_replicas_in_sync > 1):\n with self.assertRaises(AssertionError):\n compare_results(results_with_ds, results_without_ds, distribution,\n testcase=self)\n else:\n compare_results(results_with_ds, results_without_ds, distribution,\n testcase=self)\n\n def run_dynamic_lr_test(self, distribution):\n with self.cached_session():\n self.set_up_test_config()\n self.skip_unsupported_test_configuration(distribution)\n\n x_train, y_train, _ = self.get_data()\n model = self.get_model()\n initial_weights = model.get_weights()\n update_freq = None\n\n if (isinstance(distribution, tpu_strategy.TPUStrategy) and\n distribution.extended.steps_per_run > 1):\n # For TPUStrategy with steps_per_run > 1, the callback is not invoked\n # every step. So, to compare the CPU/TPU, we let the CPU to behave the\n # same as TPU.\n update_freq = distribution.extended.steps_per_run\n\n def input_fn(dist):\n \"\"\"Generates training test given test configuration.\"\"\"\n training_epochs = 2\n global_batch_size = 64\n batch_size = get_batch_size(global_batch_size, dist)\n\n training_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n 'epochs': training_epochs,\n 'shuffle': False,\n 'callbacks': [LearningRateBatchScheduler(update_freq)],\n 'validation_data': (x_train, y_train)\n }\n # In this test case, we do not care eval and predict.\n eval_inputs, predict_inputs = None, None\n return training_inputs, eval_inputs, predict_inputs\n\n results_with_ds = fit_eval_and_predict(\n initial_weights, input_fn=input_fn, model_fn=self.get_model,\n distribution=distribution)\n results_without_ds = fit_eval_and_predict(\n initial_weights, input_fn=input_fn, model_fn=self.get_model,\n distribution=None)\n compare_results(results_with_ds, results_without_ds, distribution,\n testcase=self)\n\n\nclass TestDistributionStrategyEmbeddingModelCorrectnessBase(\n TestDistributionStrategyCorrectnessBase):\n \"\"\"Base class to test correctness of Keras models with embedding layers.\"\"\"\n\n def get_data(self,\n count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),\n min_words=5,\n max_words=10,\n max_word_id=19,\n num_classes=2):\n distribution = []\n for _ in range(num_classes):\n dist = np.abs(np.random.randn(max_word_id))\n dist /= np.sum(dist)\n distribution.append(dist)\n\n features = []\n labels = []\n for _ in range(count):\n label = np.random.randint(0, num_classes, size=1)[0]\n num_words = np.random.randint(min_words, max_words, size=1)[0]\n word_ids = np.random.choice(\n max_word_id, size=num_words, replace=True, p=distribution[label])\n word_ids = word_ids\n labels.append(label)\n features.append(word_ids)\n\n features = keras.preprocessing.sequence.pad_sequences(\n features, maxlen=max_words)\n x_train = np.asarray(features, dtype=np.float32)\n y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))\n x_predict = x_train\n return x_train, y_train, x_predict\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "numpy.sum", "numpy.random.seed", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.eager.test.main", "numpy.reshape", "numpy.asarray", "numpy.random.choice", "tensorflow.python.keras.backend.set_image_data_format", "tensorflow.python.keras.preprocessing.sequence.pad_sequences", "numpy.random.randn", "tensorflow.python.keras.engine.distributed_training_utils.global_batch_size_supported", "tensorflow.python.keras.backend.set_value", "numpy.array", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.contrib.distribute.python.combinations.combine", "numpy.random.randint" ] ]
Thanh-Nguyen91/Facial_keypoints_detection
[ "fe0a55d9cbb0d55f1d35fe21189a8edb0fb7e637" ]
[ "models.py" ]
[ "## TODO: define the convolutional neural network architecture\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# can use the below import should you choose to initialize the weights of your Net\nimport torch.nn.init as I\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n \n ## TODO: Define all the layers of this CNN, the only requirements are:\n ## 1. This network takes in a square (same width and height), grayscale image as input\n ## 2. It ends with a linear layer that represents the keypoints\n ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs\n \n # As an example, you've been given a convolutional layer, which you may (but don't have to) change:\n # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel\n \n self.conv1 = nn.Conv2d(1, 32, 3)\n self.conv2 = nn.Conv2d(32,64,2) \n self.conv3 = nn.Conv2d(64,128,2) \n self.conv4 = nn.Conv2d(128,256,2)\n self.conv5 = nn.Conv2d(256,512,2)\n \n self.pool = nn.MaxPool2d(2,2)\n \n self.drop = nn.Dropout(0.2)\n \n self.fc1 = nn.Linear(512*6*6,1000)\n self.fc2 = nn.Linear(1000,136)\n \n ## Note that among the layers to add, consider including:\n # maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting\n \n\n \n def forward(self, x):\n ## TODO: Define the feedforward behavior of this model\n ## x is the input image and, as an example, here you may choose to include a pool/conv step:\n \n # Input 1x224x224 => 32x222x222 => 32x111x111\n x = self.pool(F.relu(self.conv1(x)))\n x = self.drop(x)\n \n # 32x111x111 => 64x110x110 => 64x55x55\n x = self.pool(F.relu(self.conv2(x)))\n x = self.drop(x)\n \n # 64x55x55 => 128x54x54 => 128x27x27\n x = self.pool(F.relu(self.conv3(x)))\n x = self.drop(x)\n \n # 128x27x27 => 256x26x26 => 256x13x13\n x = self.pool(F.relu(self.conv4(x)))\n x = self.drop(x)\n \n # 256x13x13 => 512x12x12 => 512x6x6\n x = self.pool(F.relu(self.conv5(x)))\n x = self.drop(x)\n \n # flatten\n x = x.view(x.size(0), -1)\n \n x = F.relu(self.fc1(x))\n x = self.drop(x)\n x = self.fc2(x)\n \n \n # a modified x, having gone through all the layers of your model, should be returned\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Dropout", "torch.nn.Conv2d" ] ]
UM-LoCoLab/NeuroLocoMiddleware
[ "0dfedeed8d6d8a41518b357b33ee92324b5029c3" ]
[ "test/test_linear_filter.py" ]
[ "from FindLibrariesWarning import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport LinearFilter as lf\nimport SysID as sid\nimport frequency_analysis as fa\n\n\n\ndef run_test(times, chirp, system, axs, **kwargs):\n\tN = len(times)\n\tus, ys = np.zeros((N,)), np.zeros((N,))\n\tfor i, t in enumerate(times):\n\t\tu = chirp.next(t)\n\t\ty = system.next(u)\n\t\tus[i]=u\n\t\tys[i]=y\n\tfa.fft_bode_plot(times, ys, us, axs=axs, **kwargs)\n\taxs[-1].set_xlim([1,100])\n\tfig = plt.figure()\n\tplt.plot(times, us)\n\tplt.plot(times, ys)\n\n\ndef plot_biquad():\n\tprint(dir(lf))\n\tprint(dir(sid))\n\tprint(dir(fa))\n\tfig,axs = fa.init_bode_plot()\n\tT = 100\n\ttest_chirp = sid.Chirp(.3, 30, T)\n\ttest_system = lf.BiQuad(7,0.2, 7,0.9)\n\tfa.tf_bode_plot(np.logspace(0.1,3,1000), \n\t\tlambda s: (2*np.pi*7)**2/(2*np.pi*7)**2 * (s**2 + 2*0.2*(2*np.pi)*7*s + (2*np.pi*7)**2)/(s**2 + 2*0.9*(2*np.pi*7)*s + (2*np.pi*7)**2),\n\t\t axs=axs, color='k' )\n\tN = 300*T\n\ttimes = np.linspace(0,T,N)\n\ttest_system.discretize_taylor_3(times[1]-times[0])\n\t# us, ys = np.zeros((N,)), np.zeros((N,))\n\t# for i, t in enumerate(times):\n\t# \tu = test_chirp.next(t)\n\t# \ty = test_system.next(u)\n\t# \tus[i]=u\n\t# \tys[i]=y\n\t# fa.fft_bode_plot(times, ys, us, axs=axs)\n\t# axs[-1].set_xlim([1,100])\n\trun_test(times, test_chirp, test_system, axs, label='taylor')\n\ttest_system.discretize_substep_euler(times[1]-times[0], N=10)\n\trun_test(times, test_chirp, test_system, axs, label='euler, N=10')\n\ttest_system.discretize_substep_euler(times[1]-times[0], N=1000)\n\trun_test(times, test_chirp, test_system, axs, label=\"euler, N=1000\")\n\ttest_system.discretize_tustin(times[1]-times[0])\n\trun_test(times, test_chirp, test_system, axs, label=\"tustin\")\n\ttest_system.discretize_N_tustin(times[1]-times[0], N=10)\n\trun_test(times, test_chirp, test_system, axs, label=\"tustin, N=10\")\n\ttest_system.discretize_N_tustin(times[1]-times[0], N=100)\n\trun_test(times, test_chirp, test_system, axs, label=\"tustin, N=100\")\n\ttest_system = lf.DiscreteBiQuad(7,0.2, 7,0.9, times[1]-times[0])\n\trun_test(times, test_chirp, test_system, axs, label=\"tustin, N=100\")\n\taxs[0].legend()\n\tplt.show()\n\n\ndef main():\n\tplot_biquad()\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "numpy.linspace", "numpy.logspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
SharpZKing/PaddleX
[ "e1306a90811df2cca24fa5fb3bfdf5a170037f17" ]
[ "paddlex/interpret/core/_session_preparation.py" ]
[ "#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport os.path as osp\nimport paddle.fluid as fluid\nimport paddlex as pdx\nimport numpy as np\nfrom paddle.fluid.param_attr import ParamAttr\nfrom paddlex.interpret.as_data_reader.readers import preprocess_image\n\n\ndef gen_user_home():\n if \"HOME\" in os.environ:\n home_path = os.environ[\"HOME\"]\n if os.path.exists(home_path) and os.path.isdir(home_path):\n return home_path\n return os.path.expanduser('~')\n\n\ndef paddle_get_fc_weights(var_name=\"fc_0.w_0\"):\n fc_weights = fluid.global_scope().find_var(var_name).get_tensor()\n return np.array(fc_weights)\n\n\ndef paddle_resize(extracted_features, outsize):\n resized_features = fluid.layers.resize_bilinear(extracted_features,\n outsize)\n return resized_features\n\n\ndef get_precomputed_normlime_weights():\n root_path = gen_user_home()\n root_path = osp.join(root_path, '.paddlex')\n h_pre_models = osp.join(root_path, \"pre_models\")\n normlime_weights_file = osp.join(\n h_pre_models, \"normlime_weights_imagenet_resnet50vc.npy\")\n return np.load(normlime_weights_file, allow_pickle=True).item()\n\n\ndef compute_features_for_kmeans(data_content):\n root_path = gen_user_home()\n root_path = osp.join(root_path, '.paddlex')\n h_pre_models = osp.join(root_path, \"pre_models\")\n if not osp.exists(h_pre_models):\n if not osp.exists(root_path):\n os.makedirs(root_path)\n url = \"https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz\"\n pdx.utils.download_and_decompress(url, path=root_path)\n\n def conv_bn_layer(input,\n num_filters,\n filter_size,\n stride=1,\n groups=1,\n act=None,\n name=None,\n is_test=True,\n global_name='for_kmeans_'):\n conv = fluid.layers.conv2d(\n input=input,\n num_filters=num_filters,\n filter_size=filter_size,\n stride=stride,\n padding=(filter_size - 1) // 2,\n groups=groups,\n act=None,\n param_attr=ParamAttr(name=global_name + name + \"_weights\"),\n bias_attr=False,\n name=global_name + name + '.conv2d.output.1')\n if name == \"conv1\":\n bn_name = \"bn_\" + name\n else:\n bn_name = \"bn\" + name[3:]\n return fluid.layers.batch_norm(\n input=conv,\n act=act,\n name=global_name + bn_name + '.output.1',\n param_attr=ParamAttr(global_name + bn_name + '_scale'),\n bias_attr=ParamAttr(global_name + bn_name + '_offset'),\n moving_mean_name=global_name + bn_name + '_mean',\n moving_variance_name=global_name + bn_name + '_variance',\n use_global_stats=is_test)\n\n startup_prog = fluid.default_startup_program().clone(for_test=True)\n prog = fluid.Program()\n with fluid.program_guard(prog, startup_prog):\n with fluid.unique_name.guard():\n image_op = fluid.data(\n name='image', shape=[None, 3, 224, 224], dtype='float32')\n\n conv = conv_bn_layer(\n input=image_op,\n num_filters=32,\n filter_size=3,\n stride=2,\n act='relu',\n name='conv1_1')\n conv = conv_bn_layer(\n input=conv,\n num_filters=32,\n filter_size=3,\n stride=1,\n act='relu',\n name='conv1_2')\n conv = conv_bn_layer(\n input=conv,\n num_filters=64,\n filter_size=3,\n stride=1,\n act='relu',\n name='conv1_3')\n extracted_features = conv\n resized_features = fluid.layers.resize_bilinear(extracted_features,\n image_op.shape[2:])\n\n gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))\n place = fluid.CUDAPlace(gpu_id)\n # place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n fluid.io.load_persistables(exe, h_pre_models, prog)\n\n images = preprocess_image(\n data_content) # transpose to [N, 3, H, W], scaled to [0.0, 1.0]\n result = exe.run(prog,\n fetch_list=[resized_features],\n feed={'image': images})\n\n return result[0][0]\n" ]
[ [ "numpy.load", "numpy.array" ] ]
hannah-thompson/Sound-Visualization
[ "040e13febd5220378e1df2d3e227dc32624aa6b2" ]
[ "code/bokeh_practice.py" ]
[ "import pyaudio\nimport wave\nfrom bokeh.layouts import gridplot\nfrom bokeh.plotting import figure, output_file, show\nimport numpy as np\n\n\n# example code for PyAudio\n#p = pyaudio.PyAudio()\n\n#stream = p.open() # need to put arguments in here\n\n# Code to play around with Bokeh\n\n# fake data\nN = 100\nx = np.linspace(0, 4*np.pi, N)\ny0 = np.sin(x)\n\n\noutput_file(\"sound.html\")\n\n# example plot\ns1 = figure(width=250, plot_height=250, title=None)\ns1.circle(x, y0, size=10, color=\"navy\", alpha=0.5)\n\n#display plot\nshow(s1)" ]
[ [ "numpy.linspace", "numpy.sin" ] ]
dsei210s19-applied-ml-and-dm/OPTMOD
[ "85edea79284097cfaeeaa8eb0be3c9595b4a733f" ]
[ "tests/test_multiply.py" ]
[ "import optmod\nimport unittest\nimport numpy as np\n\nclass TestMultiply(unittest.TestCase):\n\n def test_contruction(self):\n\n x = optmod.variable.VariableScalar(name='x')\n f = optmod.function.multiply([x, optmod.expression.make_Expression(1.)])\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertEqual(f.name, 'multiply')\n self.assertEqual(len(f.arguments), 2)\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1].is_constant())\n self.assertEqual(f.arguments[1].get_value(), 1.)\n\n self.assertRaises(AssertionError, optmod.function.multiply, [1., x, 2.])\n self.assertRaises(AssertionError, optmod.function.multiply, [x])\n self.assertRaises(TypeError, optmod.function.multiply, x)\n\n def test_constant(self):\n\n a = optmod.constant.Constant(4.)\n b = optmod.constant.Constant(5.)\n\n f = a*b\n self.assertTrue(f.is_constant(20.))\n \n def test_scalar_scalar(self):\n \n rn = optmod.utils.repr_number\n \n x = optmod.variable.VariableScalar(name='x', value=2.)\n y = optmod.variable.VariableScalar(name='y', value=3.)\n\n f = x*2\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1].is_constant())\n self.assertEqual(f.get_value(), 4.)\n self.assertEqual(str(f), 'x*%s' %rn(2.))\n\n f = 2.*x\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1].is_constant())\n self.assertEqual(f.get_value(), 4.)\n self.assertEqual(str(f), 'x*%s' %rn(2.))\n\n f = x*y\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1] is y)\n self.assertEqual(f.get_value(), 6)\n self.assertEqual(str(f), 'x*y')\n\n f = x*(y+3.)\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1].is_function())\n self.assertEqual(f.get_value(), 12)\n self.assertEqual(str(f), 'x*(y + %s)' %rn(3.))\n\n f = (1-y)*x\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0].is_function())\n self.assertTrue(f.arguments[1] is x)\n self.assertEqual(f.get_value(), -4)\n self.assertEqual(str(f), '(%s + y*%s)*x' %(rn(1.), rn(-1.)))\n\n f = (4.*x)*(3*y)\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0].is_function())\n self.assertTrue(f.arguments[1].is_function())\n self.assertEqual(f.get_value(), 72)\n self.assertEqual(str(f), 'x*%s*y*%s' %(rn(4), rn(3)))\n\n f = -x*5\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[1].is_constant())\n self.assertTrue(f.arguments[0].is_variable())\n self.assertEqual(str(f), 'x*%s' %rn(-5))\n self.assertEqual(f.get_value(), -10.)\n\n f = y*-x\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is y)\n self.assertTrue(f.arguments[1].is_function())\n self.assertEqual(str(f), 'y*x*%s' %rn(-1))\n self.assertEqual(f.get_value(), -6.)\n\n f = optmod.sin(x)*y\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0].is_function())\n self.assertTrue(f.arguments[1] is y)\n self.assertEqual(str(f), 'sin(x)*y')\n self.assertEqual(f.get_value(), np.sin(2.)*3.)\n\n f = x*optmod.sin(y)\n self.assertTrue(isinstance(f, optmod.function.multiply))\n self.assertTrue(f.arguments[0] is x)\n self.assertTrue(f.arguments[1].is_function())\n self.assertEqual(str(f), 'x*sin(y)')\n self.assertEqual(f.get_value(), np.sin(3.)*2.)\n \n def test_scalar_matrix(self):\n\n rn = optmod.utils.repr_number\n \n value = [[1., 2., 3.], [4., 5., 6.]]\n x = optmod.variable.VariableScalar(name='x', value=2.)\n y = optmod.variable.VariableMatrix(name='y', value=value)\n r = np.random.random((2,3))\n \n def test_matrix_matrix(self):\n\n pass\n\n def test_one(self):\n\n x = optmod.variable.VariableScalar(name='x', value=3.)\n \n def test_derivative(self):\n\n rn = optmod.utils.repr_number\n\n x = optmod.variable.VariableScalar(name='x', value=3.)\n y = optmod.variable.VariableScalar(name='y', value=4.)\n z = optmod.variable.VariableScalar(name='z', value=5.)\n\n f = x*x\n fx = f.get_derivative(x)\n self.assertEqual(fx.get_value(), 2.*3.)\n self.assertEqual(str(fx), 'x + x')\n\n f = x*y\n fx = f.get_derivative(x)\n fy = f.get_derivative(y)\n fz = f.get_derivative(z)\n self.assertTrue(fx is y)\n self.assertTrue(fy is x)\n self.assertTrue(fz.is_constant())\n self.assertEqual(fz.get_value(), 0)\n\n f = x*y*z\n fx = f.get_derivative(x)\n fy = f.get_derivative(y)\n fz = f.get_derivative(z)\n\n self.assertEqual(str(fx), 'z*y')\n self.assertEqual(fx.get_value(), 20.)\n self.assertEqual(str(fy), 'z*x')\n self.assertEqual(fy.get_value(), 15.)\n self.assertEqual(str(fz), 'x*y')\n self.assertEqual(fz.get_value(), 12.)\n\n def test_analyze(self):\n\n x = optmod.variable.VariableScalar(name='x', value=3.)\n y = optmod.variable.VariableScalar(name='y', value=4.)\n z = optmod.variable.VariableScalar(name='z', value=5.)\n\n f = 3*x\n prop = f.__analyze__()\n self.assertTrue(prop['affine'])\n self.assertEqual(prop['b'], 0.)\n self.assertEqual(len(prop['a']), 1)\n self.assertEqual(prop['a'][x], 3.)\n\n f = x*7\n prop = f.__analyze__()\n self.assertTrue(prop['affine'])\n self.assertEqual(prop['b'], 0.)\n self.assertEqual(len(prop['a']), 1)\n self.assertEqual(prop['a'][x], 7.)\n\n f = y*x\n prop = f.__analyze__()\n self.assertFalse(prop['affine'])\n self.assertEqual(prop['b'], 0.)\n self.assertEqual(len(prop['a']), 2)\n self.assertTrue(x in prop['a'])\n self.assertTrue(y in prop['a'])\n\n f = y*x*z\n prop = f.__analyze__()\n self.assertFalse(prop['affine'])\n self.assertEqual(prop['b'], 0.)\n self.assertEqual(len(prop['a']), 3)\n self.assertTrue(x in prop['a'])\n self.assertTrue(y in prop['a'])\n self.assertTrue(z in prop['a']) \n\n def test_std_components(self):\n\n x = optmod.variable.VariableScalar(name='x', value=3.)\n y = optmod.variable.VariableScalar(name='y', value=4.)\n z = optmod.variable.VariableScalar(name='z', value=5.)\n\n f = x*y\n comp = f.__get_std_components__()\n phi = comp['phi']\n gphi_list = comp['gphi_list']\n Hphi_list = comp['Hphi_list']\n\n self.assertTrue(phi is f)\n\n self.assertEqual(len(gphi_list), 2)\n\n v, exp = gphi_list[0]\n self.assertTrue(v is x)\n self.assertTrue(exp is y)\n \n v, exp = gphi_list[1]\n self.assertTrue(v is y)\n self.assertTrue(exp is x)\n\n self.assertEqual(len(Hphi_list), 1)\n\n v1, v2, exp = Hphi_list[0]\n self.assertTrue(v1 is x)\n self.assertTrue(v2 is y)\n self.assertTrue(exp.is_constant(1.))\n\n f = x*x\n comp = f.__get_std_components__()\n phi = comp['phi']\n gphi_list = comp['gphi_list']\n Hphi_list = comp['Hphi_list']\n\n self.assertTrue(phi is f)\n\n self.assertEqual(len(gphi_list), 1)\n\n v, exp = gphi_list[0]\n self.assertTrue(v is x)\n self.assertTrue(str(exp), 'x + x')\n \n self.assertEqual(len(Hphi_list), 1)\n\n v1, v2, exp = Hphi_list[0]\n self.assertTrue(v1 is x)\n self.assertTrue(v2 is x)\n self.assertTrue(exp.is_constant(2.))\n \n \n" ]
[ [ "numpy.random.random", "numpy.sin" ] ]
vrushank-agrawal/video_editor_BX23
[ "3a458114f499e0ba3d1c61afde2b9d30bc76459b" ]
[ "src/audio/aubio/python/tests/test_onset.py" ]
[ "#! /usr/bin/env python\n\nfrom numpy.testing import TestCase, assert_equal, assert_almost_equal\nfrom aubio import onset, fvec\n\nclass aubio_onset_default(TestCase):\n\n def test_members(self):\n o = onset()\n assert_equal ([o.buf_size, o.hop_size, o.method, o.samplerate],\n [1024,512,'default',44100])\n\nclass aubio_onset_params(TestCase):\n\n samplerate = 44100\n\n def setUp(self):\n self.o = onset(samplerate = self.samplerate)\n\n def test_get_delay(self):\n self.assertGreater(self.o.get_delay(), 0)\n\n def test_get_delay_s(self):\n self.assertGreater(self.o.get_delay_s(), 0.)\n\n def test_get_delay_ms(self):\n self.assertGreater(self.o.get_delay_ms(), 0.)\n\n def test_get_minioi(self):\n self.assertGreater(self.o.get_minioi(), 0)\n\n def test_get_minioi_s(self):\n self.assertGreater(self.o.get_minioi_s(), 0.)\n\n def test_get_minioi_ms(self):\n self.assertGreater(self.o.get_minioi_ms(), 0.)\n\n def test_get_threshold(self):\n self.assertGreater(self.o.get_threshold(), 0.)\n\n def test_set_delay(self):\n val = 256\n self.o.set_delay(val)\n assert_equal (self.o.get_delay(), val)\n\n def test_set_delay_s(self):\n val = .05\n self.o.set_delay_s(val)\n assert_almost_equal (self.o.get_delay_s(), val)\n\n def test_set_delay_ms(self):\n val = 50.\n self.o.set_delay_ms(val)\n assert_almost_equal (self.o.get_delay_ms(), val)\n\n def test_set_minioi(self):\n val = 200\n self.o.set_minioi(val)\n assert_equal (self.o.get_minioi(), val)\n\n def test_set_minioi_s(self):\n val = 0.04\n self.o.set_minioi_s(val)\n assert_almost_equal (self.o.get_minioi_s(), val)\n\n def test_set_minioi_ms(self):\n val = 40.\n self.o.set_minioi_ms(val)\n assert_almost_equal (self.o.get_minioi_ms(), val)\n\n def test_set_threshold(self):\n val = 0.2\n self.o.set_threshold(val)\n assert_almost_equal (self.o.get_threshold(), val)\n\nclass aubio_onset_96000(aubio_onset_params):\n samplerate = 96000\n\nclass aubio_onset_32000(aubio_onset_params):\n samplerate = 32000\n\nclass aubio_onset_8000(aubio_onset_params):\n samplerate = 8000\n\nclass aubio_onset_coverate(TestCase):\n # extra tests to execute the C routines and improve coverage\n\n def test_all_methods(self):\n for method in ['default', 'energy', 'hfc', 'complexdomain', 'complex',\n 'phase', 'wphase', 'mkl', 'kl', 'specflux', 'specdiff',\n 'old_default']:\n o = onset(method=method, buf_size=512, hop_size=256)\n o(fvec(256))\n\n def test_get_methods(self):\n o = onset(method='default', buf_size=512, hop_size=256)\n\n assert o.get_silence() == -70\n o.set_silence(-20)\n assert_almost_equal(o.get_silence(), -20)\n\n assert o.get_compression() == 1\n o.set_compression(.99)\n assert_almost_equal(o.get_compression(), .99)\n\n assert o.get_awhitening() == 0\n o.set_awhitening(1)\n assert o.get_awhitening() == 1\n\n o.get_last()\n o.get_last_ms()\n o.get_last_s()\n o.get_descriptor()\n o.get_thresholded_descriptor()\n\n\nif __name__ == '__main__':\n from unittest import main\n main()\n" ]
[ [ "numpy.testing.assert_equal" ] ]
xinkeyu/stanfordnlp
[ "f678a6c4003c2e48caf7957b9aba8bc8fa9a3327" ]
[ "stanfordnlp/models/pos/data.py" ]
[ "import random\nimport torch\n\nfrom stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all\nfrom stanfordnlp.models.common import conll\nfrom stanfordnlp.models.common.vocab import PAD_ID, VOCAB_PREFIX\nfrom stanfordnlp.models.pos.vocab import CharVocab, WordVocab, XPOSVocab, FeatureVocab, MultiVocab\nfrom stanfordnlp.models.pos.xpos_vocab_factory import xpos_vocab_factory\nfrom stanfordnlp.pipeline.doc import Document\n\nclass DataLoader:\n def __init__(self, input_src, batch_size, args, pretrain, vocab=None, evaluation=False, sort_during_eval=False):\n self.batch_size = batch_size\n self.args = args\n self.eval = evaluation\n self.shuffled = not self.eval\n self.sort_during_eval = sort_during_eval\n\n # check if input source is a file or a Document object\n if isinstance(input_src, str):\n filename = input_src\n assert filename.endswith('conllu'), \"Loaded file must be conllu file.\"\n self.conll, data = self.load_file(filename, evaluation=self.eval)\n elif isinstance(input_src, Document):\n filename = None\n doc = input_src\n self.conll, data = self.load_doc(doc)\n\n # handle vocab\n if vocab is None:\n self.vocab = self.init_vocab(data)\n else:\n self.vocab = vocab\n self.pretrain_vocab = pretrain.vocab\n\n # filter and sample data\n if args.get('sample_train', 1.0) < 1.0 and not self.eval:\n keep = int(args['sample_train'] * len(data))\n data = random.sample(data, keep)\n print(\"Subsample training set with rate {:g}\".format(args['sample_train']))\n\n data = self.preprocess(data, self.vocab, self.pretrain_vocab, args)\n # shuffle for training\n if self.shuffled:\n random.shuffle(data)\n self.num_examples = len(data)\n\n # chunk into batches\n self.data = self.chunk_batches(data)\n if filename is not None:\n print(\"{} batches created for {}.\".format(len(self.data), filename))\n\n def init_vocab(self, data):\n assert self.eval == False # for eval vocab must exist\n charvocab = CharVocab(data, self.args['shorthand'])\n wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)\n uposvocab = WordVocab(data, self.args['shorthand'], idx=1)\n xposvocab = xpos_vocab_factory(data, self.args['shorthand'])\n featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)\n vocab = MultiVocab({'char': charvocab,\n 'word': wordvocab,\n 'upos': uposvocab,\n 'xpos': xposvocab,\n 'feats': featsvocab})\n return vocab\n\n def preprocess(self, data, vocab, pretrain_vocab, args):\n processed = []\n for sent in data:\n processed_sent = [vocab['word'].map([w[0] for w in sent])]\n processed_sent += [[vocab['char'].map([x for x in w[0]]) for w in sent]]\n processed_sent += [vocab['upos'].map([w[1] for w in sent])]\n processed_sent += [vocab['xpos'].map([w[2] for w in sent])]\n processed_sent += [vocab['feats'].map([w[3] for w in sent])]\n processed_sent += [pretrain_vocab.map([w[0] for w in sent])]\n processed.append(processed_sent)\n return processed\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n \"\"\" Get a batch with index. \"\"\"\n if not isinstance(key, int):\n raise TypeError\n if key < 0 or key >= len(self.data):\n raise IndexError\n batch = self.data[key]\n batch_size = len(batch)\n batch = list(zip(*batch))\n assert len(batch) == 6\n\n # sort sentences by lens for easy RNN operations\n lens = [len(x) for x in batch[0]]\n batch, orig_idx = sort_all(batch, lens)\n\n # sort words by lens for easy char-RNN operations\n batch_words = [w for sent in batch[1] for w in sent]\n word_lens = [len(x) for x in batch_words]\n batch_words, word_orig_idx = sort_all([batch_words], word_lens)\n batch_words = batch_words[0]\n word_lens = [len(x) for x in batch_words]\n\n # convert to tensors\n words = batch[0]\n words = get_long_tensor(words, batch_size)\n words_mask = torch.eq(words, PAD_ID)\n wordchars = get_long_tensor(batch_words, len(word_lens))\n wordchars_mask = torch.eq(wordchars, PAD_ID)\n\n upos = get_long_tensor(batch[2], batch_size)\n xpos = get_long_tensor(batch[3], batch_size)\n ufeats = get_long_tensor(batch[4], batch_size)\n pretrained = get_long_tensor(batch[5], batch_size)\n sentlens = [len(x) for x in batch[0]]\n return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, orig_idx, word_orig_idx, sentlens, word_lens\n\n def __iter__(self):\n for i in range(self.__len__()):\n yield self.__getitem__(i)\n\n def load_file(self, filename, evaluation=False):\n conll_file = conll.CoNLLFile(filename)\n data = conll_file.get(['word', 'upos', 'xpos', 'feats'], as_sentences=True)\n return conll_file, data\n\n def load_doc(self, doc):\n data = doc.conll_file.get(['word', 'upos', 'xpos', 'feats'], as_sentences=True)\n return doc.conll_file, data\n\n def reshuffle(self):\n data = [y for x in self.data for y in x]\n self.data = self.chunk_batches(data)\n random.shuffle(self.data)\n\n def chunk_batches(self, data):\n res = []\n\n if not self.eval:\n # sort sentences (roughly) by length for better memory utilization\n data = sorted(data, key = lambda x: len(x[0]), reverse=random.random() > .5)\n elif self.sort_during_eval:\n (data, ), self.data_orig_idx = sort_all([data], [len(x[0]) for x in data])\n\n current = []\n currentlen = 0\n for x in data:\n if len(x[0]) + currentlen > self.batch_size:\n res.append(current)\n current = []\n currentlen = 0\n current.append(x)\n currentlen += len(x[0])\n\n if currentlen > 0:\n res.append(current)\n\n return res\n" ]
[ [ "torch.eq" ] ]
KarolBedkowski/marketools
[ "4f4a25407916bf9077c7e264ab6e6053d893b218" ]
[ "tests/test_analysis/test_moving_average.py" ]
[ "import pytest\nfrom marketools.analysis.moving_average import simple_moving_average, weighted_moving_average, exponential_moving_average\nimport pandas as pd\nimport numpy as np\n\n\[email protected]\ndef NineDayPrices():\n prices = np.array([3.37, 4.21, 4.10, 3.90, 3.79, 3.76, 3.65, 3.75, 3.88])\n df = pd.DataFrame(prices, columns=['Close'])\n return df\n\n\[email protected](\"window,expected\", [\n (2, np.array([3.79, 4.155, 4.00, 3.845, 3.775, 3.705, 3.70, 3.815])),\n (3, np.array([3.89, 4.07, 3.93, 3.82, 3.73, 3.72, 3.76]))\n])\ndef test_simple_moving_average(NineDayPrices, window, expected):\n output = simple_moving_average(NineDayPrices, window=window)\n output.dropna(inplace=True)\n output_arr = output.to_numpy()\n\n for i in range(len(expected)):\n assert pytest.approx(expected[i], output_arr[i])\n\n assert len(expected) == len(output_arr)\n\n\[email protected](\"window,expected\", [\n (2, np.array([3.93, 4.1367, 3.967, 3.83, 3.77, 3.687, 3.72, 3.84])),\n (3, np.array([4.015, 4.018, 3.88, 3.79, 3.71, 3.718, 3.798])),\n])\ndef test_weighted_moving_average(NineDayPrices, window, expected):\n output = weighted_moving_average(NineDayPrices, window=window)\n output.dropna(inplace=True)\n output_arr = output.to_numpy()\n\n for i in range(len(expected)):\n assert pytest.approx(expected[i], output_arr[i])\n\n assert len(expected) == len(output_arr)\n\n\[email protected](\"span,expected\", [\n (2, np.array([3.37, 3.93, 4.043333333, 3.947777778, 3.842592593, 3.787530864, 3.695843621, 3.731947874, 3.830649291])),\n (3, np.array([3.37, 3.79, 3.945, 3.9225, 3.85625, 3.808125, 3.7290625, 3.73953125, 3.809765625])),\n])\ndef test_exponential_moving_average(NineDayPrices, span, expected):\n output = exponential_moving_average(NineDayPrices, window=span)\n output.dropna(inplace=True)\n output_arr = output.to_numpy()\n\n for i in range(len(expected)):\n assert pytest.approx(expected[i], output_arr[i])\n\n assert len(expected) == len(output_arr)\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
sachio222/aha4
[ "ec378fe1bace85e325ad7cb8686b8ba321dc97d0" ]
[ "train.py" ]
[ "\"\"\"\nTitle: train.py\nAuthor: J. Krajewski\nCopyright: 2020\nLicense:\nDescription: Training for model implemented from 2019 paper\n \"AHA an Artificial Hippocampal Algorithm for Episodic Machine \n Learning\" by Kowadlo, Ahmed and Rawlinson.\n\n Runs image through EC, DG / EC-->CA3, CA3, and CA1. This network\n accepts multiple, varied examples of an original sample, and\n reconstructs the original sample, given multiple samples that vary\n from the origin sample to different degrees.\n\n Pretraining was run on EC prior to running this model (see\n pretrain.py)\n \nThanks: Special thanks to Gideon Kowaldo and David Rawlinson for\n putting me up to the task of recreating their model in Pytorch!\n As well as @ptrblk in the Pytorch forums for always coming through\n with answers when I needed them. \n\"\"\"\n\nfrom pathlib2 import Path\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nfrom torchvision import transforms\nfrom torchvision.datasets import Omniglot\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n# User modules\nfrom model import modules # pylint: disable=no-name-in-module\nfrom utils import utils # pylint: disable=RP0003, F0401\n\n# Clear terminal with ANSI <ESC> c \"\\033c\"\n# print(\"\\033c\", end=\"\") # (Doesn't fully clear screen on PC)\nprint(\"\\033[H\\033[2J\", end=\"\")\n\n# Initialize paths to json parameters\ndata_path = Path().absolute() / \"data\"\nmodel_path = Path().absolute() / \"experiments/train/\"\npretrain_path = Path().absolute() / \"experiments/pretrain/\"\njson_path = model_path / \"params.json\"\n\n# Load params json\nassert json_path.is_file(\n), f\"\\n\\nERROR: No params.json file found at {json_path}\\n\"\nparams = utils.Params(json_path)\n\n# If GPU, write to params file\nparams.cuda = torch.cuda.is_available()\n\n# Set random seed\ntorch.manual_seed(42)\nif params.cuda:\n torch.cuda.manual_seed(42)\n # Update num_workers to 2 if running on GPU\n params.num_workers = 2\n\n\ndef train(model,\n dataloader,\n ectoca3_optimizer,\n ca1_optimizer,\n ectoca3_loss_fn,\n ca1_loss_fn,\n params,\n autosave=False,\n train_mode=True):\n\n # Set model to train or eval.\n if not train_mode:\n model.eval()\n\n # Load weights\n utils.load_checkpoint(model_path, step4_ectoca3, name=\"ectoca3_weights\")\n utils.load_checkpoint(model_path, step5_ca1, name=\"ca1_weights\")\n\n # Custom loader for ca3.\n ca3_weights_path = model_path / \"ca3_weights.pth.tar\"\n ca3_weights = torch.load(ca3_weights_path.as_posix())\n step3_ca3.W = ca3_weights\n else:\n model.train()\n\n for epoch in range(params.num_epochs):\n for i, (x, _) in enumerate(dataloader):\n if params.cuda:\n x = x.cuda(non_blocking=True)\n\n #=============RUN EC=============#\n\n with torch.no_grad():\n ec_maxpool_flat = step1_ec(x, k=4)\n\n #=====MONITORING=====#\n\n # ec_out_weight = step1_ec.encoder.weight.data\n ## DISPLAY\n # utils.animate_weights(ec_out_weight, auto=False)\n\n # for i, out in enumerate(ec_maxpool_flat):\n # print(out.shape)\n # ec_grid = torchvision.utils.make_grid(out, nrow=11)\n # utils.animate_weights(ec_grid, i)\n\n #=====END MONIT.=====#\n\n #=============END EC=============#\n\n #=============RUN DENTATE GYRUS=============#\n with torch.no_grad():\n dg_sparse = step2_dg(ec_maxpool_flat, k=10)\n\n ## DISPLAY \n # utils.showme(dg_sparse)\n # exit()\n\n # Polarize output from (0, 1) to (-1, 1) for step3_ca3\n dg_sparse_dressed = modules.all_dressed(dg_sparse)\n\n ## DISPLAY \n # utils.showme(dg_sparse_dressed)\n # exit()\n\n #=============END DENTATE GYRUS=============#\n\n #=============RUN CA3 TRAINING==============#\n\n if not train_mode:\n pass\n else:\n with torch.no_grad():\n ca3_weights = step3_ca3.train(dg_sparse_dressed)\n\n if autosave:\n ca3_state = step3_ca3.W\n utils.save_checkpoint(ca3_state,\n model_path,\n name=\"ca3_weights\",\n silent=False)\n \n ## DISPLAY\n # utils.showme(ca3_weights)\n # exit()\n print(\"CA3 weights updated.\")\n #=============END CA3 TRAINING==============#\n\n #=============RUN EC->CA3===================#\n\n if not train_mode:\n trained_sparse = step4_ectoca3(ec_maxpool_flat)\n else:\n # Run training\n loss_avg = utils.RunningAverage()\n\n with tqdm (desc=\"Updating EC->CA3\", total=params.ectoca3_iters) as t1:\n for i in range(params.ectoca3_iters):\n trained_sparse = step4_ectoca3(ec_maxpool_flat)\n ectoca3_loss = ectoca3_loss_fn(trained_sparse, dg_sparse)\n ectoca3_optimizer.zero_grad()\n ectoca3_loss.backward(retain_graph=True)\n # print(i, ectoca3_loss)\n # NOTE: Learning rate has large impact on quality of output\n ectoca3_optimizer.step()\n\n loss_avg.update(ectoca3_loss.item())\n\n t1.set_postfix(loss=\"{:05.3f}\".format(loss_avg()))\n t1.update()\n\n ## DISPLAY\n utils.animate_weights(trained_sparse.detach(), auto=True)\n\n if autosave:\n ec_state = utils.get_save_state(epoch, step4_ectoca3,\n ectoca3_optimizer)\n utils.save_checkpoint(ec_state,\n model_path,\n name=\"ectoca3_weights\",\n silent=True)\n\n # Polarize output from (0, 1) to (-1, 1) for step3_ca3\n ectoca3_out_dressed = modules.all_dressed(trained_sparse)\n\n ## DISPLAY\n # utils.showme(ectoca3_out_dressed.detach())\n # exit()\n\n #=============END EC->CA3=================#\n\n #=============RUN CA3 RECALL==============#\n\n ca3_out_recall = step3_ca3.update(ectoca3_out_dressed)\n\n ## DISPLAY\n # utils.showme(ca3_out_recall.detach())\n\n #=============END CA3 TRAINING==============#\n\n #=============RUN CA1 ======================#\n\n if not train_mode:\n ca1_reconstruction = step5_ca1(ca3_out_recall)\n else:\n loss_avg.reset()\n\n with tqdm (desc=\"Updating CA1\", total=params.ca1_iters) as t2:\n for i in range(params.ca1_iters):\n ca1_reconstruction = step5_ca1(ca3_out_recall)\n ca1_loss = ca1_loss_fn(ca1_reconstruction, x)\n ca1_optimizer.zero_grad()\n\n if i == (params.ca1_iters - 1):\n ca1_loss.backward(retain_graph=False)\n else:\n ca1_loss.backward(retain_graph=True)\n\n # print(i, ca1_loss)\n ca1_optimizer.step()\n\n loss_avg.update(ca1_loss.item())\n\n t2.set_postfix(loss=\"{:05.3f}\".format(loss_avg()))\n t2.update()\n\n ## DISPLAY\n utils.animate_weights(ca1_reconstruction.detach(), nrow=5, auto=True)\n\n if autosave:\n ec_state = utils.get_save_state(epoch, step5_ca1,\n ectoca3_optimizer)\n utils.save_checkpoint(ec_state,\n model_path,\n name=\"ca1_weights\",\n silent=False)\n \n print(\"Graph cleared.\", end=\" \")\n print(\"Weights successfully updated.\\n\")\n #=============END CA1 =============#\n\n # Optional exit to end after one batch\n exit()\n\n\n# Define transforms\ntsfm = transforms.Compose([\n transforms.Grayscale(1),\n transforms.Resize(params.resize_dim),\n transforms.ToTensor()\n])\n\n# Import from torchvision.datasets Omniglot\ndataset = Omniglot(data_path, background=False, transform=tsfm, download=True)\n\ndataloader = DataLoader(dataset,\n params.batch_size,\n shuffle=True,\n num_workers=params.num_workers,\n drop_last=True)\n\n#================BEGIN MODELS================#\n\n# Initialize layers with parmeters.\nstep1_ec = modules.EC(params.batch_size,\n D_in=1,\n D_out=121,\n KERNEL_SIZE=9,\n STRIDE=1,\n PADDING=4)\n\nstep2_dg = modules.DG(params.batch_size, 27225, 225)\n\nstep3_ca3 = modules.CA3(225)\n\nstep4_ectoca3 = modules.ECToCA3(27225, 225)\n\nstep5_ca1 = modules.CA1(params.batch_size, 225, 2704, params.resize_dim)\n\n#================END MODELS================#\n\n# Set loss_fn to Binary cross entropy for Autoencoder.\nectoca3_loss_fn = nn.BCELoss()\nca1_loss_fn = nn.MSELoss()\n\nectoca3_optimizer = optim.Adam(step4_ectoca3.parameters(),\n lr=params.ectoca3_learning_rate,\n weight_decay=params.ectoca3_weight_decay)\n\nca1_optimizer = optim.Adam(step5_ca1.parameters(),\n lr=params.ca1_learning_rate,\n weight_decay=params.ca1_weight_decay)\n\n# Get pretrained weights. Comment out if not wanted.\nutils.load_checkpoint(pretrain_path, step1_ec, name=\"pre_train\")\n\n# Start training\n# Train mode runs backprop and stores weights in the Hopfield net. \n# Autosave over-writes existing weights if set to true.\n\ntrain(step1_ec,\n dataloader,\n ectoca3_optimizer,\n ca1_optimizer,\n ectoca3_loss_fn,\n ca1_loss_fn,\n params,\n autosave=False,\n train_mode=False)\n" ]
[ [ "torch.cuda.manual_seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.no_grad", "torch.cuda.is_available", "torch.nn.MSELoss" ] ]
tupui/ray
[ "c5edd82c63438ae15be18a605eea4a962870a4be" ]
[ "python/ray/data/tests/test_dataset.py" ]
[ "import math\nimport os\nimport random\nimport requests\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport pytest\n\nimport ray\n\nfrom ray.tests.conftest import * # noqa\nfrom ray.data.dataset import Dataset, _sliding_window\nfrom ray.data.datasource.csv_datasource import CSVDatasource\nfrom ray.data.block import BlockAccessor\nfrom ray.data.context import DatasetContext\nfrom ray.data.row import TableRow\nfrom ray.data.impl.arrow_block import ArrowRow\nfrom ray.data.impl.block_builder import BlockBuilder\nfrom ray.data.impl.lazy_block_list import LazyBlockList\nfrom ray.data.impl.pandas_block import PandasRow\nfrom ray.data.aggregate import AggregateFn, Count, Sum, Min, Max, Mean, Std\nfrom ray.data.extensions.tensor_extension import (\n TensorArray,\n TensorDtype,\n ArrowTensorType,\n ArrowTensorArray,\n)\nimport ray.data.tests.util as util\nfrom ray.data.tests.conftest import * # noqa\n\n\ndef maybe_pipeline(ds, enabled):\n if enabled:\n return ds.window(blocks_per_window=1)\n else:\n return ds\n\n\ndef maybe_lazy(ds, enabled):\n if enabled:\n return ds.experimental_lazy()\n else:\n return ds\n\n\nclass SlowCSVDatasource(CSVDatasource):\n def _read_stream(self, f: \"pa.NativeFile\", path: str, **reader_args):\n for block in CSVDatasource._read_stream(self, f, path, **reader_args):\n time.sleep(3)\n yield block\n\n\n# Tests that we don't block on exponential rampup when doing bulk reads.\n# https://github.com/ray-project/ray/issues/20625\[email protected](\"block_split\", [False, True])\ndef test_bulk_lazy_eval_split_mode(shutdown_only, block_split, tmp_path):\n ray.init(num_cpus=8)\n ctx = ray.data.context.DatasetContext.get_current()\n\n try:\n original = ctx.block_splitting_enabled\n\n ray.data.range(8, parallelism=8).write_csv(str(tmp_path))\n ctx.block_splitting_enabled = block_split\n ds = ray.data.read_datasource(\n SlowCSVDatasource(), parallelism=8, paths=str(tmp_path)\n )\n\n start = time.time()\n ds.map(lambda x: x)\n delta = time.time() - start\n\n print(\"full read time\", delta)\n # Should run in ~3 seconds. It takes >9 seconds if bulk read is broken.\n assert delta < 8, delta\n finally:\n ctx.block_splitting_enabled = original\n\n\[email protected](\"pipelined\", [False, True])\ndef test_basic_actors(shutdown_only, pipelined):\n ray.init(num_cpus=2)\n n = 5\n ds = ray.data.range(n)\n ds = maybe_pipeline(ds, pipelined)\n assert sorted(ds.map(lambda x: x + 1, compute=\"actors\").take()) == list(\n range(1, n + 1)\n )\n\n # Should still work even if num actors > num cpus.\n ds = ray.data.range(n)\n ds = maybe_pipeline(ds, pipelined)\n assert sorted(\n ds.map(lambda x: x + 1, compute=ray.data.ActorPoolStrategy(4, 4)).take()\n ) == list(range(1, n + 1))\n\n # Test setting custom max inflight tasks.\n ds = ray.data.range(10, parallelism=5)\n ds = maybe_pipeline(ds, pipelined)\n assert (\n sorted(\n ds.map(\n lambda x: x + 1,\n compute=ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=3),\n ).take()\n )\n == list(range(1, 11))\n )\n\n # Test invalid max tasks inflight arg.\n with pytest.raises(ValueError):\n ray.data.range(10).map(\n lambda x: x,\n compute=ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=0),\n )\n\n # Test min no more than max check.\n with pytest.raises(ValueError):\n ray.data.range(10).map(lambda x: x, compute=ray.data.ActorPoolStrategy(8, 4))\n\n\[email protected](\"pipelined\", [False, True])\ndef test_avoid_placement_group_capture(shutdown_only, pipelined):\n ray.init(num_cpus=2)\n\n @ray.remote\n def run():\n ds0 = ray.data.range(5)\n ds = maybe_pipeline(ds0, pipelined)\n assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]\n ds = maybe_pipeline(ds0, pipelined)\n assert ds.count() == 5\n ds = maybe_pipeline(ds0, pipelined)\n assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4]\n\n pg = ray.util.placement_group([{\"CPU\": 1}])\n ray.get(\n run.options(\n placement_group=pg, placement_group_capture_child_tasks=True\n ).remote()\n )\n\n\ndef test_callable_classes(shutdown_only):\n ray.init(num_cpus=1)\n ds = ray.data.range(10)\n\n class StatefulFn:\n def __init__(self):\n self.num_reuses = 0\n\n def __call__(self, x):\n r = self.num_reuses\n self.num_reuses += 1\n return r\n\n # Need to specify compute explicitly.\n with pytest.raises(ValueError):\n ds.map(StatefulFn).take()\n\n # Need to specify actor compute strategy.\n with pytest.raises(ValueError):\n ds.map(StatefulFn, compute=\"tasks\").take()\n\n # map\n actor_reuse = ds.map(StatefulFn, compute=\"actors\").take()\n assert sorted(actor_reuse) == list(range(10)), actor_reuse\n\n class StatefulFn:\n def __init__(self):\n self.num_reuses = 0\n\n def __call__(self, x):\n r = self.num_reuses\n self.num_reuses += 1\n return [r]\n\n # flat map\n actor_reuse = ds.flat_map(StatefulFn, compute=\"actors\").take()\n assert sorted(actor_reuse) == list(range(10)), actor_reuse\n\n # map batches\n actor_reuse = ds.map_batches(StatefulFn, compute=\"actors\").take()\n assert sorted(actor_reuse) == list(range(10)), actor_reuse\n\n class StatefulFn:\n def __init__(self):\n self.num_reuses = 0\n\n def __call__(self, x):\n r = self.num_reuses\n self.num_reuses += 1\n return r > 0\n\n # filter\n actor_reuse = ds.filter(StatefulFn, compute=\"actors\").take()\n assert len(actor_reuse) == 9, actor_reuse\n\n\ndef test_transform_failure(shutdown_only):\n ray.init(num_cpus=2)\n ds = ray.data.from_items([0, 10], parallelism=2)\n\n def mapper(x):\n time.sleep(x)\n raise ValueError(\"oops\")\n return x\n\n with pytest.raises(ray.exceptions.RayTaskError):\n ds.map(mapper)\n\n\[email protected](\"lazy\", [False, True])\ndef test_dataset_lineage_serialization(shutdown_only, lazy):\n ray.init()\n ds = ray.data.range(10)\n ds = maybe_lazy(ds, lazy)\n ds = ds.map(lambda x: x + 1)\n ds = ds.map(lambda x: x + 1)\n ds = ds.random_shuffle()\n epoch = ds._get_epoch()\n uuid = ds._get_uuid()\n plan_uuid = ds._plan._dataset_uuid\n lazy = ds._lazy\n\n serialized_ds = ds.serialize_lineage()\n # Confirm that the original Dataset was properly copied before clearing/mutating.\n in_blocks = ds._plan._in_blocks\n # Should not raise.\n in_blocks._check_if_cleared()\n assert isinstance(in_blocks, LazyBlockList)\n if lazy:\n assert in_blocks._block_partition_refs[0] is not None\n else:\n assert ds._plan._snapshot_blocks is not None\n\n ray.shutdown()\n ray.init()\n\n ds = Dataset.deserialize_lineage(serialized_ds)\n # Check Dataset state.\n assert ds._get_epoch() == epoch\n assert ds._get_uuid() == uuid\n assert ds._plan._dataset_uuid == plan_uuid\n assert ds._lazy == lazy\n # Check Dataset content.\n assert ds.count() == 10\n assert sorted(ds.take()) == list(range(2, 12))\n\n\[email protected](\"lazy\", [False, True])\ndef test_dataset_lineage_serialization_unsupported(shutdown_only, lazy):\n ray.init()\n # In-memory data sources not supported.\n ds = ray.data.from_items(list(range(10)))\n ds = maybe_lazy(ds, lazy)\n ds = ds.map(lambda x: x + 1)\n ds = ds.map(lambda x: x + 1)\n\n with pytest.raises(ValueError):\n ds.serialize_lineage()\n\n # In-memory data source unions not supported.\n ds = ray.data.from_items(list(range(10)))\n ds = maybe_lazy(ds, lazy)\n ds1 = ray.data.from_items(list(range(10, 20)))\n ds2 = ds.union(ds1)\n\n with pytest.raises(ValueError):\n ds2.serialize_lineage()\n\n # Post-lazy-read unions not supported.\n ds = ray.data.range(10).map(lambda x: x + 1)\n ds = maybe_lazy(ds, lazy)\n ds1 = ray.data.range(20).map(lambda x: 2 * x)\n ds2 = ds.union(ds1)\n\n with pytest.raises(ValueError):\n ds2.serialize_lineage()\n\n # Lazy read unions supported.\n ds = ray.data.range(10)\n ds = maybe_lazy(ds, lazy)\n ds1 = ray.data.range(20)\n ds2 = ds.union(ds1)\n\n serialized_ds = ds2.serialize_lineage()\n ds3 = Dataset.deserialize_lineage(serialized_ds)\n assert ds3.take(30) == list(range(10)) + list(range(20))\n\n # Zips not supported.\n ds = ray.data.from_items(list(range(10)))\n ds = maybe_lazy(ds, lazy)\n ds1 = ray.data.from_items(list(range(10, 20)))\n ds2 = ds.zip(ds1)\n\n with pytest.raises(ValueError):\n ds2.serialize_lineage()\n\n\[email protected](\"pipelined\", [False, True])\ndef test_basic(ray_start_regular_shared, pipelined):\n ds0 = ray.data.range(5)\n ds = maybe_pipeline(ds0, pipelined)\n assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]\n ds = maybe_pipeline(ds0, pipelined)\n assert ds.count() == 5\n ds = maybe_pipeline(ds0, pipelined)\n assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4]\n\n\ndef test_zip(ray_start_regular_shared):\n ds1 = ray.data.range(5)\n ds2 = ray.data.range(5).map(lambda x: x + 1)\n ds = ds1.zip(ds2)\n assert ds.schema() == tuple\n assert ds.take() == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]\n with pytest.raises(ValueError):\n ds.zip(ray.data.range(3))\n\n\ndef test_zip_pandas(ray_start_regular_shared):\n ds1 = ray.data.from_pandas(pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5]}))\n ds2 = ray.data.from_pandas(pd.DataFrame({\"col3\": [\"a\", \"b\"], \"col4\": [\"d\", \"e\"]}))\n ds = ds1.zip(ds2)\n assert ds.count() == 2\n assert \"{col1: int64, col2: int64, col3: object, col4: object}\" in str(ds)\n result = [r.as_pydict() for r in ds.take()]\n assert result[0] == {\"col1\": 1, \"col2\": 4, \"col3\": \"a\", \"col4\": \"d\"}\n\n ds3 = ray.data.from_pandas(pd.DataFrame({\"col2\": [\"a\", \"b\"], \"col4\": [\"d\", \"e\"]}))\n ds = ds1.zip(ds3)\n assert ds.count() == 2\n assert \"{col1: int64, col2: int64, col2_1: object, col4: object}\" in str(ds)\n result = [r.as_pydict() for r in ds.take()]\n assert result[0] == {\"col1\": 1, \"col2\": 4, \"col2_1\": \"a\", \"col4\": \"d\"}\n\n\ndef test_zip_arrow(ray_start_regular_shared):\n ds1 = ray.data.range_table(5).map(lambda r: {\"id\": r[\"value\"]})\n ds2 = ray.data.range_table(5).map(\n lambda r: {\"a\": r[\"value\"] + 1, \"b\": r[\"value\"] + 2}\n )\n ds = ds1.zip(ds2)\n assert \"{id: int64, a: int64, b: int64}\" in str(ds)\n assert ds.count() == 5\n result = [r.as_pydict() for r in ds.take()]\n assert result[0] == {\"id\": 0, \"a\": 1, \"b\": 2}\n\n # Test duplicate column names.\n ds = ds1.zip(ds1).zip(ds1)\n assert ds.count() == 5\n assert \"{id: int64, id_1: int64, id_2: int64}\" in str(ds)\n result = [r.as_pydict() for r in ds.take()]\n assert result[0] == {\"id\": 0, \"id_1\": 0, \"id_2\": 0}\n\n\ndef test_batch_tensors(ray_start_regular_shared):\n import torch\n\n ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)])\n res = \"Dataset(num_blocks=40, num_rows=40, schema=<class 'torch.Tensor'>)\"\n assert str(ds) == res, str(ds)\n with pytest.raises(pa.lib.ArrowInvalid):\n next(ds.iter_batches(batch_format=\"pyarrow\"))\n df = next(ds.iter_batches(batch_format=\"pandas\"))\n assert df.to_dict().keys() == {\"value\"}\n\n\ndef test_arrow_block_slice_copy():\n # Test that ArrowBlock slicing properly copies the underlying Arrow\n # table.\n def check_for_copy(table1, table2, a, b, is_copy):\n expected_slice = table1.slice(a, b - a)\n assert table2.equals(expected_slice)\n assert table2.schema == table1.schema\n assert table1.num_columns == table2.num_columns\n for col1, col2 in zip(table1.columns, table2.columns):\n assert col1.num_chunks == col2.num_chunks\n for chunk1, chunk2 in zip(col1.chunks, col2.chunks):\n bufs1 = chunk1.buffers()\n bufs2 = chunk2.buffers()\n expected_offset = 0 if is_copy else a\n assert chunk2.offset == expected_offset\n assert len(chunk2) == b - a\n if is_copy:\n assert bufs2[1].address != bufs1[1].address\n else:\n assert bufs2[1].address == bufs1[1].address\n\n n = 20\n df = pd.DataFrame(\n {\"one\": list(range(n)), \"two\": [\"a\"] * n, \"three\": [np.nan] + [1.5] * (n - 1)}\n )\n table = pa.Table.from_pandas(df)\n a, b = 5, 10\n block_accessor = BlockAccessor.for_block(table)\n\n # Test with copy.\n table2 = block_accessor.slice(a, b, True)\n check_for_copy(table, table2, a, b, is_copy=True)\n\n # Test without copy.\n table2 = block_accessor.slice(a, b, False)\n check_for_copy(table, table2, a, b, is_copy=False)\n\n\ndef test_arrow_block_slice_copy_empty():\n # Test that ArrowBlock slicing properly copies the underlying Arrow\n # table when the table is empty.\n df = pd.DataFrame({\"one\": []})\n table = pa.Table.from_pandas(df)\n a, b = 0, 0\n expected_slice = table.slice(a, b - a)\n block_accessor = BlockAccessor.for_block(table)\n\n # Test with copy.\n table2 = block_accessor.slice(a, b, True)\n assert table2.equals(expected_slice)\n assert table2.schema == table.schema\n assert table2.num_rows == 0\n\n # Test without copy.\n table2 = block_accessor.slice(a, b, False)\n assert table2.equals(expected_slice)\n assert table2.schema == table.schema\n assert table2.num_rows == 0\n\n\ndef test_range_table(ray_start_regular_shared):\n ds = ray.data.range_table(10)\n assert ds.num_blocks() == 10\n assert ds.count() == 10\n assert ds.take() == [{\"value\": i} for i in range(10)]\n\n ds = ray.data.range_table(10, parallelism=2)\n assert ds.num_blocks() == 2\n assert ds.count() == 10\n assert ds.take() == [{\"value\": i} for i in range(10)]\n\n\ndef test_tensors(ray_start_regular_shared):\n # Create directly.\n ds = ray.data.range_tensor(5, shape=(3, 5))\n assert str(ds) == (\n \"Dataset(num_blocks=5, num_rows=5, \"\n \"schema={value: <ArrowTensorType: shape=(3, 5), dtype=int64>})\"\n )\n\n # Pandas conversion.\n res = (\n ray.data.range_tensor(10)\n .map_batches(lambda t: t + 2, batch_format=\"pandas\")\n .take(2)\n )\n assert str(res) == \"[{'value': array([2])}, {'value': array([3])}]\"\n\n\ndef test_tensor_array_ops(ray_start_regular_shared):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n\n df = pd.DataFrame({\"one\": [1, 2, 3], \"two\": TensorArray(arr)})\n\n def apply_arithmetic_ops(arr):\n return 2 * (arr + 1) / 3\n\n def apply_comparison_ops(arr):\n return arr % 2 == 0\n\n def apply_logical_ops(arr):\n return arr & (3 * arr) | (5 * arr)\n\n # Op tests, using NumPy as the groundtruth.\n np.testing.assert_equal(apply_arithmetic_ops(arr), apply_arithmetic_ops(df[\"two\"]))\n\n np.testing.assert_equal(apply_comparison_ops(arr), apply_comparison_ops(df[\"two\"]))\n\n np.testing.assert_equal(apply_logical_ops(arr), apply_logical_ops(df[\"two\"]))\n\n\ndef test_tensor_array_reductions(ray_start_regular_shared):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n\n df = pd.DataFrame({\"one\": list(range(outer_dim)), \"two\": TensorArray(arr)})\n\n # Reduction tests, using NumPy as the groundtruth.\n for name, reducer in TensorArray.SUPPORTED_REDUCERS.items():\n np_kwargs = {}\n if name in (\"std\", \"var\"):\n # Pandas uses a ddof default of 1 while NumPy uses 0.\n # Give NumPy a ddof kwarg of 1 in order to ensure equivalent\n # standard deviation calculations.\n np_kwargs[\"ddof\"] = 1\n np.testing.assert_equal(df[\"two\"].agg(name), reducer(arr, axis=0, **np_kwargs))\n\n\ndef test_tensor_array_block_slice():\n # Test that ArrowBlock slicing works with tensor column extension type.\n def check_for_copy(table1, table2, a, b, is_copy):\n expected_slice = table1.slice(a, b - a)\n assert table2.equals(expected_slice)\n assert table2.schema == table1.schema\n assert table1.num_columns == table2.num_columns\n for col1, col2 in zip(table1.columns, table2.columns):\n assert col1.num_chunks == col2.num_chunks\n for chunk1, chunk2 in zip(col1.chunks, col2.chunks):\n bufs1 = chunk1.buffers()\n bufs2 = chunk2.buffers()\n expected_offset = 0 if is_copy else a\n assert chunk2.offset == expected_offset\n assert len(chunk2) == b - a\n if is_copy:\n assert bufs2[1].address != bufs1[1].address\n else:\n assert bufs2[1].address == bufs1[1].address\n\n n = 20\n one_arr = np.arange(4 * n).reshape(n, 2, 2)\n df = pd.DataFrame({\"one\": TensorArray(one_arr), \"two\": [\"a\"] * n})\n table = pa.Table.from_pandas(df)\n a, b = 5, 10\n block_accessor = BlockAccessor.for_block(table)\n\n # Test with copy.\n table2 = block_accessor.slice(a, b, True)\n np.testing.assert_array_equal(table2[\"one\"].chunk(0).to_numpy(), one_arr[a:b, :, :])\n check_for_copy(table, table2, a, b, is_copy=True)\n\n # Test without copy.\n table2 = block_accessor.slice(a, b, False)\n np.testing.assert_array_equal(table2[\"one\"].chunk(0).to_numpy(), one_arr[a:b, :, :])\n check_for_copy(table, table2, a, b, is_copy=False)\n\n\[email protected](\n \"test_data,a,b\",\n [\n ([[False, True], [True, False], [True, True], [False, False]], 1, 3),\n ([[False, True], [True, False], [True, True], [False, False]], 0, 1),\n (\n [\n [False, True],\n [True, False],\n [True, True],\n [False, False],\n [True, False],\n [False, False],\n [False, True],\n [True, True],\n [False, False],\n [True, True],\n [False, True],\n [True, False],\n ],\n 3,\n 6,\n ),\n (\n [\n [False, True],\n [True, False],\n [True, True],\n [False, False],\n [True, False],\n [False, False],\n [False, True],\n [True, True],\n [False, False],\n [True, True],\n [False, True],\n [True, False],\n ],\n 7,\n 11,\n ),\n (\n [\n [False, True],\n [True, False],\n [True, True],\n [False, False],\n [True, False],\n [False, False],\n [False, True],\n [True, True],\n [False, False],\n [True, True],\n [False, True],\n [True, False],\n ],\n 9,\n 12,\n ),\n ],\n)\[email protected](\"init_with_pandas\", [True, False])\ndef test_tensor_array_boolean_slice_pandas_roundtrip(init_with_pandas, test_data, a, b):\n n = len(test_data)\n test_arr = np.array(test_data)\n df = pd.DataFrame({\"one\": TensorArray(test_arr), \"two\": [\"a\"] * n})\n if init_with_pandas:\n table = pa.Table.from_pandas(df)\n else:\n pa_dtype = pa.bool_()\n flat = [w for v in test_data for w in v]\n data_array = pa.array(flat, pa_dtype)\n inner_len = len(test_data[0])\n offsets = list(range(0, len(flat) + 1, inner_len))\n offset_buffer = pa.py_buffer(np.int32(offsets))\n storage = pa.Array.from_buffers(\n pa.list_(pa_dtype),\n len(test_data),\n [None, offset_buffer],\n children=[data_array],\n )\n t_arr = pa.ExtensionArray.from_storage(\n ArrowTensorType((inner_len,), pa.bool_()), storage\n )\n table = pa.table({\"one\": t_arr, \"two\": [\"a\"] * n})\n block_accessor = BlockAccessor.for_block(table)\n\n # Test without copy.\n table2 = block_accessor.slice(a, b, False)\n np.testing.assert_array_equal(table2[\"one\"].chunk(0).to_numpy(), test_arr[a:b, :])\n pd.testing.assert_frame_equal(\n table2.to_pandas().reset_index(drop=True), df[a:b].reset_index(drop=True)\n )\n\n # Test with copy.\n table2 = block_accessor.slice(a, b, True)\n np.testing.assert_array_equal(table2[\"one\"].chunk(0).to_numpy(), test_arr[a:b, :])\n pd.testing.assert_frame_equal(\n table2.to_pandas().reset_index(drop=True), df[a:b].reset_index(drop=True)\n )\n\n\ndef test_arrow_tensor_array_getitem(ray_start_regular_shared):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n\n t_arr = ArrowTensorArray.from_numpy(arr)\n\n for idx in range(outer_dim):\n np.testing.assert_array_equal(t_arr[idx], arr[idx])\n\n # Test __iter__.\n for t_subarr, subarr in zip(t_arr, arr):\n np.testing.assert_array_equal(t_subarr, subarr)\n\n # Test to_pylist.\n np.testing.assert_array_equal(t_arr.to_pylist(), list(arr))\n\n # Test slicing and indexing.\n t_arr2 = t_arr[1:]\n\n np.testing.assert_array_equal(t_arr2.to_numpy(), arr[1:])\n\n for idx in range(1, outer_dim):\n np.testing.assert_array_equal(t_arr2[idx - 1], arr[idx])\n\n\[email protected](\n \"test_arr,dtype\",\n [\n ([[1, 2], [3, 4], [5, 6], [7, 8]], None),\n ([[1, 2], [3, 4], [5, 6], [7, 8]], np.int32),\n ([[1, 2], [3, 4], [5, 6], [7, 8]], np.int16),\n ([[1, 2], [3, 4], [5, 6], [7, 8]], np.longlong),\n ([[1.5, 2.5], [3.3, 4.2], [5.2, 6.9], [7.6, 8.1]], None),\n ([[1.5, 2.5], [3.3, 4.2], [5.2, 6.9], [7.6, 8.1]], np.float32),\n ([[1.5, 2.5], [3.3, 4.2], [5.2, 6.9], [7.6, 8.1]], np.float16),\n ([[False, True], [True, False], [True, True], [False, False]], None),\n ],\n)\ndef test_arrow_tensor_array_slice(test_arr, dtype):\n # Test that ArrowTensorArray slicing works as expected.\n arr = np.array(test_arr, dtype=dtype)\n ata = ArrowTensorArray.from_numpy(arr)\n np.testing.assert_array_equal(ata.to_numpy(), arr)\n slice1 = ata.slice(0, 2)\n np.testing.assert_array_equal(slice1.to_numpy(), arr[0:2])\n np.testing.assert_array_equal(slice1[1], arr[1])\n slice2 = ata.slice(2, 2)\n np.testing.assert_array_equal(slice2.to_numpy(), arr[2:4])\n np.testing.assert_array_equal(slice2[1], arr[3])\n\n\ndef test_tensors_in_tables_from_pandas(ray_start_regular_shared):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame({\"one\": list(range(outer_dim)), \"two\": list(arr)})\n # Cast column to tensor extension dtype.\n df[\"two\"] = df[\"two\"].astype(TensorDtype())\n ds = ray.data.from_pandas([df])\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_pandas_roundtrip(ray_start_regular_shared):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame({\"one\": list(range(outer_dim)), \"two\": TensorArray(arr)})\n ds = ray.data.from_pandas([df])\n ds_df = ds.to_pandas()\n assert ds_df.equals(df)\n\n\ndef test_tensors_in_tables_parquet_roundtrip(ray_start_regular_shared, tmp_path):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame({\"one\": list(range(outer_dim)), \"two\": TensorArray(arr)})\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n ds = ray.data.read_parquet(str(tmp_path))\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_parquet_with_schema(ray_start_regular_shared, tmp_path):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame({\"one\": list(range(outer_dim)), \"two\": TensorArray(arr)})\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n schema = pa.schema(\n [\n (\"one\", pa.int32()),\n (\"two\", ArrowTensorType(inner_shape, pa.from_numpy_dtype(arr.dtype))),\n ]\n )\n ds = ray.data.read_parquet(str(tmp_path), schema=schema)\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_parquet_pickle_manual_serde(\n ray_start_regular_shared, tmp_path\n):\n import pickle\n\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame(\n {\"one\": list(range(outer_dim)), \"two\": [pickle.dumps(a) for a in arr]}\n )\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n ds = ray.data.read_parquet(str(tmp_path))\n\n # Manually deserialize the tensor pickle bytes and cast to our tensor\n # extension type.\n def deser_mapper(batch: pd.DataFrame):\n batch[\"two\"] = [pickle.loads(a) for a in batch[\"two\"]]\n batch[\"two\"] = batch[\"two\"].astype(TensorDtype())\n return batch\n\n casted_ds = ds.map_batches(deser_mapper, batch_format=\"pandas\")\n\n values = [[s[\"one\"], s[\"two\"]] for s in casted_ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n # Manually deserialize the pickle tensor bytes and directly cast it to a\n # TensorArray.\n def deser_mapper_direct(batch: pd.DataFrame):\n batch[\"two\"] = TensorArray([pickle.loads(a) for a in batch[\"two\"]])\n return batch\n\n casted_ds = ds.map_batches(deser_mapper_direct, batch_format=\"pandas\")\n\n values = [[s[\"one\"], s[\"two\"]] for s in casted_ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_parquet_bytes_manual_serde(\n ray_start_regular_shared, tmp_path\n):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame(\n {\"one\": list(range(outer_dim)), \"two\": [a.tobytes() for a in arr]}\n )\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n ds = ray.data.read_parquet(str(tmp_path))\n\n tensor_col_name = \"two\"\n\n # Manually deserialize the tensor bytes and cast to a TensorArray.\n def np_deser_mapper(batch: pa.Table):\n # NOTE(Clark): We use NumPy to consolidate these potentially\n # non-contiguous buffers, and to do buffer bookkeeping in general.\n np_col = np.array(\n [\n np.ndarray(inner_shape, buffer=buf.as_buffer(), dtype=arr.dtype)\n for buf in batch.column(tensor_col_name)\n ]\n )\n\n return batch.set_column(\n batch._ensure_integer_index(tensor_col_name),\n tensor_col_name,\n ArrowTensorArray.from_numpy(np_col),\n )\n\n ds = ds.map_batches(np_deser_mapper, batch_format=\"pyarrow\")\n\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_parquet_bytes_manual_serde_udf(\n ray_start_regular_shared, tmp_path\n):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n tensor_col_name = \"two\"\n df = pd.DataFrame(\n {\"one\": list(range(outer_dim)), tensor_col_name: [a.tobytes() for a in arr]}\n )\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n\n # Manually deserialize the tensor bytes and cast to a TensorArray.\n def np_deser_udf(block: pa.Table):\n # NOTE(Clark): We use NumPy to consolidate these potentially\n # non-contiguous buffers, and to do buffer bookkeeping in general.\n np_col = np.array(\n [\n np.ndarray(inner_shape, buffer=buf.as_buffer(), dtype=arr.dtype)\n for buf in block.column(tensor_col_name)\n ]\n )\n\n return block.set_column(\n block._ensure_integer_index(tensor_col_name),\n tensor_col_name,\n ArrowTensorArray.from_numpy(np_col),\n )\n\n ds = ray.data.read_parquet(str(tmp_path), _block_udf=np_deser_udf)\n\n assert isinstance(ds.schema().field_by_name(tensor_col_name).type, ArrowTensorType)\n\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\ndef test_tensors_in_tables_parquet_bytes_manual_serde_col_schema(\n ray_start_regular_shared, tmp_path\n):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n tensor_col_name = \"two\"\n df = pd.DataFrame(\n {\"one\": list(range(outer_dim)), tensor_col_name: [a.tobytes() for a in arr]}\n )\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n\n def _block_udf(block: pa.Table):\n df = block.to_pandas()\n df[tensor_col_name] += 1\n return pa.Table.from_pandas(df)\n\n ds = ray.data.read_parquet(\n str(tmp_path),\n tensor_column_schema={tensor_col_name: (arr.dtype, inner_shape)},\n _block_udf=_block_udf,\n )\n\n assert isinstance(ds.schema().field_by_name(tensor_col_name).type, ArrowTensorType)\n\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr + 1))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\[email protected](\n reason=(\n \"Waiting for Arrow to support registering custom ExtensionType \"\n \"casting kernels. See \"\n \"https://issues.apache.org/jira/browse/ARROW-5890#\"\n )\n)\ndef test_tensors_in_tables_parquet_bytes_with_schema(\n ray_start_regular_shared, tmp_path\n):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df = pd.DataFrame(\n {\"one\": list(range(outer_dim)), \"two\": [a.tobytes() for a in arr]}\n )\n ds = ray.data.from_pandas([df])\n ds.write_parquet(str(tmp_path))\n schema = pa.schema(\n [\n (\"one\", pa.int32()),\n (\"two\", ArrowTensorType(inner_shape, pa.from_numpy_dtype(arr.dtype))),\n ]\n )\n ds = ray.data.read_parquet(str(tmp_path), schema=schema)\n values = [[s[\"one\"], s[\"two\"]] for s in ds.take()]\n expected = list(zip(list(range(outer_dim)), arr))\n for v, e in zip(sorted(values), expected):\n np.testing.assert_equal(v, e)\n\n\[email protected](\"pipelined\", [False, True])\ndef test_tensors_in_tables_to_torch(ray_start_regular_shared, pipelined):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df1 = pd.DataFrame(\n {\"one\": TensorArray(arr), \"two\": TensorArray(arr + 1), \"label\": [1.0, 2.0, 3.0]}\n )\n arr2 = np.arange(num_items, 2 * num_items).reshape(shape)\n df2 = pd.DataFrame(\n {\n \"one\": TensorArray(arr2),\n \"two\": TensorArray(arr2 + 1),\n \"label\": [4.0, 5.0, 6.0],\n }\n )\n df = pd.concat([df1, df2])\n ds = ray.data.from_pandas([df1, df2])\n ds = maybe_pipeline(ds, pipelined)\n torchd = ds.to_torch(\n label_column=\"label\", batch_size=2, unsqueeze_label_tensor=False\n )\n\n num_epochs = 1 if pipelined else 2\n for _ in range(num_epochs):\n features, labels = [], []\n for batch in iter(torchd):\n features.append(batch[0].numpy())\n labels.append(batch[1].numpy())\n features, labels = np.concatenate(features), np.concatenate(labels)\n values = np.stack([df[\"one\"].to_numpy(), df[\"two\"].to_numpy()], axis=1)\n np.testing.assert_array_equal(values, features)\n np.testing.assert_array_equal(df[\"label\"].to_numpy(), labels)\n\n\[email protected](\"pipelined\", [False, True])\ndef test_tensors_in_tables_to_torch_mix(ray_start_regular_shared, pipelined):\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape)\n df1 = pd.DataFrame(\n {\n \"one\": TensorArray(arr),\n \"two\": [1, 2, 3],\n \"label\": [1.0, 2.0, 3.0],\n }\n )\n arr2 = np.arange(num_items, 2 * num_items).reshape(shape)\n df2 = pd.DataFrame(\n {\n \"one\": TensorArray(arr2),\n \"two\": [4, 5, 6],\n \"label\": [4.0, 5.0, 6.0],\n }\n )\n df = pd.concat([df1, df2])\n ds = ray.data.from_pandas([df1, df2])\n ds = maybe_pipeline(ds, pipelined)\n torchd = ds.to_torch(\n label_column=\"label\",\n feature_columns=[[\"one\"], [\"two\"]],\n batch_size=2,\n unsqueeze_label_tensor=False,\n unsqueeze_feature_tensors=False,\n )\n\n num_epochs = 1 if pipelined else 2\n for _ in range(num_epochs):\n col1, col2, labels = [], [], []\n for batch in iter(torchd):\n col1.append(batch[0][0].numpy())\n col2.append(batch[0][1].numpy())\n labels.append(batch[1].numpy())\n col1, col2 = np.concatenate(col1), np.concatenate(col2)\n labels = np.concatenate(labels)\n np.testing.assert_array_equal(col1, np.sort(df[\"one\"].to_numpy()))\n np.testing.assert_array_equal(col2, np.sort(df[\"two\"].to_numpy()))\n np.testing.assert_array_equal(labels, np.sort(df[\"label\"].to_numpy()))\n\n\[email protected](\"pipelined\", [False, True])\ndef test_tensors_in_tables_to_tf(ray_start_regular_shared, pipelined):\n import tensorflow as tf\n\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape).astype(np.float)\n df1 = pd.DataFrame(\n {\n \"one\": TensorArray(arr),\n \"two\": TensorArray(arr + 1),\n \"label\": [1, 2, 3],\n }\n )\n arr2 = np.arange(num_items, 2 * num_items).reshape(shape).astype(np.float)\n df2 = pd.DataFrame(\n {\n \"one\": TensorArray(arr2),\n \"two\": TensorArray(arr2 + 1),\n \"label\": [4, 5, 6],\n }\n )\n df = pd.concat([df1, df2])\n ds = ray.data.from_pandas([df1, df2])\n ds = maybe_pipeline(ds, pipelined)\n tfd = ds.to_tf(\n label_column=\"label\",\n output_signature=(\n tf.TensorSpec(shape=(None, 2, 2, 2, 2), dtype=tf.float32),\n tf.TensorSpec(shape=(None,), dtype=tf.float32),\n ),\n batch_size=2,\n )\n features, labels = [], []\n for batch in tfd.as_numpy_iterator():\n features.append(batch[0])\n labels.append(batch[1])\n features, labels = np.concatenate(features), np.concatenate(labels)\n values = np.stack([df[\"one\"].to_numpy(), df[\"two\"].to_numpy()], axis=1)\n np.testing.assert_array_equal(values, features)\n np.testing.assert_array_equal(df[\"label\"].to_numpy(), labels)\n\n\[email protected](\"pipelined\", [False, True])\ndef test_tensors_in_tables_to_tf_mix(ray_start_regular_shared, pipelined):\n import tensorflow as tf\n\n outer_dim = 3\n inner_shape = (2, 2, 2)\n shape = (outer_dim,) + inner_shape\n num_items = np.prod(np.array(shape))\n arr = np.arange(num_items).reshape(shape).astype(np.float)\n df1 = pd.DataFrame(\n {\n \"one\": TensorArray(arr),\n \"two\": [1, 2, 3],\n \"label\": [1.0, 2.0, 3.0],\n }\n )\n arr2 = np.arange(num_items, 2 * num_items).reshape(shape).astype(np.float)\n df2 = pd.DataFrame(\n {\n \"one\": TensorArray(arr2),\n \"two\": [4, 5, 6],\n \"label\": [4.0, 5.0, 6.0],\n }\n )\n df = pd.concat([df1, df2])\n ds = ray.data.from_pandas([df1, df2])\n ds = maybe_pipeline(ds, pipelined)\n tfd = ds.to_tf(\n label_column=\"label\",\n feature_columns=[[\"one\"], [\"two\"]],\n output_signature=(\n (\n tf.TensorSpec(shape=(None, 1, 2, 2, 2), dtype=tf.float32),\n tf.TensorSpec(shape=(None, 1), dtype=tf.float32),\n ),\n tf.TensorSpec(shape=(None,), dtype=tf.float32),\n ),\n batch_size=2,\n )\n col1, col2, labels = [], [], []\n for batch in tfd.as_numpy_iterator():\n col1.append(batch[0][0])\n col2.append(batch[0][1])\n labels.append(batch[1])\n col1 = np.squeeze(np.concatenate(col1), axis=1)\n col2 = np.squeeze(np.concatenate(col2), axis=1)\n labels = np.concatenate(labels)\n np.testing.assert_array_equal(col1, np.sort(df[\"one\"].to_numpy()))\n np.testing.assert_array_equal(col2, np.sort(df[\"two\"].to_numpy()))\n np.testing.assert_array_equal(labels, np.sort(df[\"label\"].to_numpy()))\n\n\ndef test_empty_shuffle(ray_start_regular_shared):\n ds = ray.data.range(100, parallelism=100)\n ds = ds.filter(lambda x: x)\n ds = ds.map_batches(lambda x: x)\n ds = ds.random_shuffle() # Would prev. crash with AssertionError: pyarrow.Table.\n ds.show()\n\n\ndef test_empty_dataset(ray_start_regular_shared):\n ds = ray.data.range(0)\n assert ds.count() == 0\n assert ds.size_bytes() is None\n assert ds.schema() is None\n\n ds = ray.data.range(1)\n ds = ds.filter(lambda x: x > 1)\n assert str(ds) == \"Dataset(num_blocks=1, num_rows=0, schema=Unknown schema)\"\n\n # Test map on empty dataset.\n ds = ray.data.from_items([])\n ds = ds.map(lambda x: x)\n assert ds.count() == 0\n\n # Test filter on empty dataset.\n ds = ray.data.from_items([])\n ds = ds.filter(lambda: True)\n assert ds.count() == 0\n\n\ndef test_schema(ray_start_regular_shared):\n ds = ray.data.range(10)\n ds2 = ray.data.range_table(10)\n ds3 = ds2.repartition(5)\n ds4 = ds3.map(lambda x: {\"a\": \"hi\", \"b\": 1.0}).limit(5).repartition(1)\n assert str(ds) == \"Dataset(num_blocks=10, num_rows=10, schema=<class 'int'>)\"\n assert str(ds2) == \"Dataset(num_blocks=10, num_rows=10, schema={value: int64})\"\n assert str(ds3) == \"Dataset(num_blocks=5, num_rows=10, schema={value: int64})\"\n assert (\n str(ds4) == \"Dataset(num_blocks=1, num_rows=5, schema={a: string, b: double})\"\n )\n\n\ndef test_schema_lazy(ray_start_regular_shared):\n ds = ray.data.range(100, parallelism=10)\n # We kick off the read task for the first block by default.\n assert ds._plan._in_blocks._num_computed() == 1\n schema = ds.schema()\n assert schema == int\n # Fetching the schema should not trigger execution of extra read tasks.\n assert ds._plan.execute()._num_computed() == 1\n\n\ndef test_lazy_loading_exponential_rampup(ray_start_regular_shared):\n ds = ray.data.range(100, parallelism=20)\n assert ds._plan.execute()._num_computed() == 1\n assert ds.take(10) == list(range(10))\n assert ds._plan.execute()._num_computed() == 2\n assert ds.take(20) == list(range(20))\n assert ds._plan.execute()._num_computed() == 4\n assert ds.take(30) == list(range(30))\n assert ds._plan.execute()._num_computed() == 8\n assert ds.take(50) == list(range(50))\n assert ds._plan.execute()._num_computed() == 16\n assert ds.take(100) == list(range(100))\n assert ds._plan.execute()._num_computed() == 20\n\n\ndef test_limit(ray_start_regular_shared):\n ds = ray.data.range(100, parallelism=20)\n for i in range(100):\n assert ds.limit(i).take(200) == list(range(i))\n\n\ndef test_convert_types(ray_start_regular_shared):\n plain_ds = ray.data.range(1)\n arrow_ds = plain_ds.map(lambda x: {\"a\": x})\n assert arrow_ds.take() == [{\"a\": 0}]\n assert \"ArrowRow\" in arrow_ds.map(lambda x: str(type(x))).take()[0]\n\n arrow_ds = ray.data.range_table(1)\n assert arrow_ds.map(lambda x: \"plain_{}\".format(x[\"value\"])).take() == [\"plain_0\"]\n assert arrow_ds.map(lambda x: {\"a\": (x[\"value\"],)}).take() == [{\"a\": [0]}]\n\n\ndef test_from_items(ray_start_regular_shared):\n ds = ray.data.from_items([\"hello\", \"world\"])\n assert ds.take() == [\"hello\", \"world\"]\n\n\ndef test_repartition_shuffle(ray_start_regular_shared):\n ds = ray.data.range(20, parallelism=10)\n assert ds.num_blocks() == 10\n assert ds.sum() == 190\n assert ds._block_num_rows() == [2] * 10\n\n ds2 = ds.repartition(5, shuffle=True)\n assert ds2.num_blocks() == 5\n assert ds2.sum() == 190\n assert ds2._block_num_rows() == [10, 10, 0, 0, 0]\n\n ds3 = ds2.repartition(20, shuffle=True)\n assert ds3.num_blocks() == 20\n assert ds3.sum() == 190\n assert ds3._block_num_rows() == [2] * 10 + [0] * 10\n\n large = ray.data.range(10000, parallelism=10)\n large = large.repartition(20, shuffle=True)\n assert large._block_num_rows() == [500] * 20\n\n\ndef test_repartition_noshuffle(ray_start_regular_shared):\n ds = ray.data.range(20, parallelism=10)\n assert ds.num_blocks() == 10\n assert ds.sum() == 190\n assert ds._block_num_rows() == [2] * 10\n\n ds2 = ds.repartition(5, shuffle=False)\n assert ds2.num_blocks() == 5\n assert ds2.sum() == 190\n assert ds2._block_num_rows() == [4, 4, 4, 4, 4]\n\n ds3 = ds2.repartition(20, shuffle=False)\n assert ds3.num_blocks() == 20\n assert ds3.sum() == 190\n assert ds3._block_num_rows() == [1] * 20\n\n # Test num_partitions > num_rows\n ds4 = ds.repartition(40, shuffle=False)\n assert ds4.num_blocks() == 40\n blocks = ray.get(ds4.get_internal_block_refs())\n assert all(isinstance(block, list) for block in blocks), blocks\n assert ds4.sum() == 190\n assert ds4._block_num_rows() == ([1] * 20) + ([0] * 20)\n\n ds5 = ray.data.range(22).repartition(4)\n assert ds5.num_blocks() == 4\n assert ds5._block_num_rows() == [5, 6, 5, 6]\n\n large = ray.data.range(10000, parallelism=10)\n large = large.repartition(20)\n assert large._block_num_rows() == [500] * 20\n\n\ndef test_repartition_shuffle_arrow(ray_start_regular_shared):\n ds = ray.data.range_table(20, parallelism=10)\n assert ds.num_blocks() == 10\n assert ds.count() == 20\n assert ds._block_num_rows() == [2] * 10\n\n ds2 = ds.repartition(5, shuffle=True)\n assert ds2.num_blocks() == 5\n assert ds2.count() == 20\n assert ds2._block_num_rows() == [10, 10, 0, 0, 0]\n\n ds3 = ds2.repartition(20, shuffle=True)\n assert ds3.num_blocks() == 20\n assert ds3.count() == 20\n assert ds3._block_num_rows() == [2] * 10 + [0] * 10\n\n large = ray.data.range_table(10000, parallelism=10)\n large = large.repartition(20, shuffle=True)\n assert large._block_num_rows() == [500] * 20\n\n\ndef test_take_all(ray_start_regular_shared):\n assert ray.data.range(5).take_all() == [0, 1, 2, 3, 4]\n\n with pytest.raises(ValueError):\n assert ray.data.range(5).take_all(4)\n\n\ndef test_convert_to_pyarrow(ray_start_regular_shared, tmp_path):\n ds = ray.data.range(100)\n assert ds.to_dask().sum().compute()[0] == 4950\n path = os.path.join(tmp_path, \"test_parquet_dir\")\n os.mkdir(path)\n ds.write_parquet(path)\n assert ray.data.read_parquet(path).count() == 100\n\n\ndef test_pyarrow(ray_start_regular_shared):\n ds = ray.data.range_table(5)\n assert ds.map(lambda x: {\"b\": x[\"value\"] + 2}).take() == [\n {\"b\": 2},\n {\"b\": 3},\n {\"b\": 4},\n {\"b\": 5},\n {\"b\": 6},\n ]\n assert ds.map(lambda x: {\"b\": x[\"value\"] + 2}).filter(\n lambda x: x[\"b\"] % 2 == 0\n ).take() == [{\"b\": 2}, {\"b\": 4}, {\"b\": 6}]\n assert ds.filter(lambda x: x[\"value\"] == 0).flat_map(\n lambda x: [{\"b\": x[\"value\"] + 2}, {\"b\": x[\"value\"] + 20}]\n ).take() == [{\"b\": 2}, {\"b\": 20}]\n\n\ndef test_read_binary_files(ray_start_regular_shared):\n with util.gen_bin_files(10) as (_, paths):\n ds = ray.data.read_binary_files(paths, parallelism=10)\n for i, item in enumerate(ds.iter_rows()):\n expected = open(paths[i], \"rb\").read()\n assert expected == item\n # Test metadata ops.\n assert ds.count() == 10\n assert \"bytes\" in str(ds.schema()), ds\n assert \"bytes\" in str(ds), ds\n\n\ndef test_read_binary_files_with_fs(ray_start_regular_shared):\n with util.gen_bin_files(10) as (tempdir, paths):\n # All the paths are absolute, so we want the root file system.\n fs, _ = pa.fs.FileSystem.from_uri(\"/\")\n ds = ray.data.read_binary_files(paths, filesystem=fs, parallelism=10)\n for i, item in enumerate(ds.iter_rows()):\n expected = open(paths[i], \"rb\").read()\n assert expected == item\n\n\ndef test_read_binary_files_with_paths(ray_start_regular_shared):\n with util.gen_bin_files(10) as (_, paths):\n ds = ray.data.read_binary_files(paths, include_paths=True, parallelism=10)\n for i, (path, item) in enumerate(ds.iter_rows()):\n assert path == paths[i]\n expected = open(paths[i], \"rb\").read()\n assert expected == item\n\n\n# TODO(Clark): Hitting S3 in CI is currently broken due to some AWS\n# credentials issue, unskip this test once that's fixed or once ported to moto.\[email protected](reason=\"Shouldn't hit S3 in CI\")\ndef test_read_binary_files_s3(ray_start_regular_shared):\n ds = ray.data.read_binary_files([\"s3://anyscale-data/small-files/0.dat\"])\n item = ds.take(1).pop()\n expected = requests.get(\n \"https://anyscale-data.s3.us-west-2.amazonaws.com/small-files/0.dat\"\n ).content\n assert item == expected\n\n\ndef test_sliding_window():\n arr = list(range(10))\n\n # Test all windows over this iterable.\n window_sizes = list(range(1, len(arr) + 1))\n for window_size in window_sizes:\n windows = list(_sliding_window(arr, window_size))\n assert len(windows) == len(arr) - window_size + 1\n assert all(len(window) == window_size for window in windows)\n assert all(\n list(window) == arr[i : i + window_size] for i, window in enumerate(windows)\n )\n\n # Test window size larger than iterable length.\n windows = list(_sliding_window(arr, 15))\n assert len(windows) == 1\n assert list(windows[0]) == arr\n\n\ndef test_iter_rows(ray_start_regular_shared):\n # Test simple rows.\n n = 10\n ds = ray.data.range(n)\n for row, k in zip(ds.iter_rows(), range(n)):\n assert row == k\n\n # Test tabular rows.\n t1 = pa.Table.from_pydict({\"one\": [1, 2, 3], \"two\": [2, 3, 4]})\n t2 = pa.Table.from_pydict({\"one\": [4, 5, 6], \"two\": [5, 6, 7]})\n t3 = pa.Table.from_pydict({\"one\": [7, 8, 9], \"two\": [8, 9, 10]})\n t4 = pa.Table.from_pydict({\"one\": [10, 11, 12], \"two\": [11, 12, 13]})\n ts = [t1, t2, t3, t4]\n t = pa.concat_tables(ts)\n ds = ray.data.from_arrow(ts)\n\n def to_pylist(table):\n pydict = table.to_pydict()\n names = table.schema.names\n pylist = [\n {column: pydict[column][row] for column in names}\n for row in range(table.num_rows)\n ]\n return pylist\n\n # Default ArrowRows.\n for row, t_row in zip(ds.iter_rows(), to_pylist(t)):\n assert isinstance(row, TableRow)\n assert isinstance(row, ArrowRow)\n assert row == t_row\n\n # PandasRows after conversion.\n pandas_ds = ds.map_batches(lambda x: x, batch_format=\"pandas\")\n df = t.to_pandas()\n for row, (index, df_row) in zip(pandas_ds.iter_rows(), df.iterrows()):\n assert isinstance(row, TableRow)\n assert isinstance(row, PandasRow)\n assert row == df_row.to_dict()\n\n # Prefetch.\n for row, t_row in zip(ds.iter_rows(prefetch_blocks=1), to_pylist(t)):\n assert isinstance(row, TableRow)\n assert isinstance(row, ArrowRow)\n assert row == t_row\n\n\ndef test_iter_batches_basic(ray_start_regular_shared):\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [2, 3, 4]})\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [5, 6, 7]})\n df3 = pd.DataFrame({\"one\": [7, 8, 9], \"two\": [8, 9, 10]})\n df4 = pd.DataFrame({\"one\": [10, 11, 12], \"two\": [11, 12, 13]})\n dfs = [df1, df2, df3, df4]\n ds = ray.data.from_pandas(dfs)\n\n # Default.\n for batch, df in zip(ds.iter_batches(batch_format=\"pandas\"), dfs):\n assert isinstance(batch, pd.DataFrame)\n assert batch.equals(df)\n\n # pyarrow.Table format.\n for batch, df in zip(ds.iter_batches(batch_format=\"pyarrow\"), dfs):\n assert isinstance(batch, pa.Table)\n assert batch.equals(pa.Table.from_pandas(df))\n\n # blocks format.\n for batch, df in zip(ds.iter_batches(batch_format=\"native\"), dfs):\n assert BlockAccessor.for_block(batch).to_pandas().equals(df)\n\n # Batch size.\n batch_size = 2\n batches = list(ds.iter_batches(batch_size=batch_size, batch_format=\"pandas\"))\n assert all(len(batch) == batch_size for batch in batches)\n assert len(batches) == math.ceil(\n (len(df1) + len(df2) + len(df3) + len(df4)) / batch_size\n )\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)\n )\n\n # Batch size larger than block.\n batch_size = 4\n batches = list(ds.iter_batches(batch_size=batch_size, batch_format=\"pandas\"))\n assert all(len(batch) == batch_size for batch in batches)\n assert len(batches) == math.ceil(\n (len(df1) + len(df2) + len(df3) + len(df4)) / batch_size\n )\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)\n )\n\n # Batch size larger than dataset.\n batch_size = 15\n batches = list(ds.iter_batches(batch_size=batch_size, batch_format=\"pandas\"))\n assert all(len(batch) == ds.count() for batch in batches)\n assert len(batches) == 1\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)\n )\n\n # Batch size drop partial.\n batch_size = 5\n batches = list(\n ds.iter_batches(batch_size=batch_size, drop_last=True, batch_format=\"pandas\")\n )\n assert all(len(batch) == batch_size for batch in batches)\n assert len(batches) == (len(df1) + len(df2) + len(df3) + len(df4)) // batch_size\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)[:10]\n )\n\n # Batch size don't drop partial.\n batch_size = 5\n batches = list(\n ds.iter_batches(batch_size=batch_size, drop_last=False, batch_format=\"pandas\")\n )\n assert all(len(batch) == batch_size for batch in batches[:-1])\n assert len(batches[-1]) == (len(df1) + len(df2) + len(df3) + len(df4)) % batch_size\n assert len(batches) == math.ceil(\n (len(df1) + len(df2) + len(df3) + len(df4)) / batch_size\n )\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)\n )\n\n # Prefetch.\n batches = list(ds.iter_batches(prefetch_blocks=1, batch_format=\"pandas\"))\n assert len(batches) == len(dfs)\n for batch, df in zip(batches, dfs):\n assert isinstance(batch, pd.DataFrame)\n assert batch.equals(df)\n\n batch_size = 2\n batches = list(\n ds.iter_batches(prefetch_blocks=2, batch_size=batch_size, batch_format=\"pandas\")\n )\n assert all(len(batch) == batch_size for batch in batches)\n assert len(batches) == math.ceil(\n (len(df1) + len(df2) + len(df3) + len(df4)) / batch_size\n )\n assert pd.concat(batches, ignore_index=True).equals(\n pd.concat(dfs, ignore_index=True)\n )\n\n # Prefetch more than number of blocks.\n batches = list(ds.iter_batches(prefetch_blocks=len(dfs), batch_format=\"pandas\"))\n assert len(batches) == len(dfs)\n for batch, df in zip(batches, dfs):\n assert isinstance(batch, pd.DataFrame)\n assert batch.equals(df)\n\n # Prefetch with ray.wait.\n context = DatasetContext.get_current()\n context.actor_prefetcher_enabled = False\n batches = list(ds.iter_batches(prefetch_blocks=1, batch_format=\"pandas\"))\n assert len(batches) == len(dfs)\n for batch, df in zip(batches, dfs):\n assert isinstance(batch, pd.DataFrame)\n assert batch.equals(df)\n\n\ndef test_iter_batches_grid(ray_start_regular_shared):\n # Tests slicing, batch combining, and partial batch dropping logic over\n # a grid of dataset, batching, and dropping configurations.\n # Grid: num_blocks x num_rows_block_1 x ... x num_rows_block_N x\n # batch_size x drop_last\n seed = int(time.time())\n print(f\"Seeding RNG for test_iter_batches_grid with: {seed}\")\n random.seed(seed)\n max_num_blocks = 20\n max_num_rows_per_block = 20\n num_blocks_samples = 3\n block_sizes_samples = 3\n batch_size_samples = 3\n\n for num_blocks in np.random.randint(1, max_num_blocks + 1, size=num_blocks_samples):\n block_sizes_list = [\n np.random.randint(1, max_num_rows_per_block + 1, size=num_blocks)\n for _ in range(block_sizes_samples)\n ]\n for block_sizes in block_sizes_list:\n # Create the dataset with the given block sizes.\n dfs = []\n running_size = 0\n for block_size in block_sizes:\n dfs.append(\n pd.DataFrame(\n {\"value\": list(range(running_size, running_size + block_size))}\n )\n )\n running_size += block_size\n num_rows = running_size\n ds = ray.data.from_pandas(dfs)\n for batch_size in np.random.randint(\n 1, num_rows + 1, size=batch_size_samples\n ):\n for drop_last in (False, True):\n batches = list(\n ds.iter_batches(\n batch_size=batch_size,\n drop_last=drop_last,\n batch_format=\"pandas\",\n )\n )\n if num_rows % batch_size == 0 or not drop_last:\n # Number of batches should be equal to\n # num_rows / batch_size, rounded up.\n assert len(batches) == math.ceil(num_rows / batch_size)\n # Concatenated batches should equal the DataFrame\n # representation of the entire dataset.\n assert pd.concat(batches, ignore_index=True).equals(\n ds.to_pandas()\n )\n else:\n # Number of batches should be equal to\n # num_rows / batch_size, rounded down.\n assert len(batches) == num_rows // batch_size\n # Concatenated batches should equal the DataFrame\n # representation of the dataset with the partial batch\n # remainder sliced off.\n assert pd.concat(batches, ignore_index=True).equals(\n ds.to_pandas()[: batch_size * (num_rows // batch_size)]\n )\n if num_rows % batch_size == 0 or drop_last:\n assert all(len(batch) == batch_size for batch in batches)\n else:\n assert all(len(batch) == batch_size for batch in batches[:-1])\n assert len(batches[-1]) == num_rows % batch_size\n\n\ndef test_lazy_loading_iter_batches_exponential_rampup(ray_start_regular_shared):\n ds = ray.data.range(32, parallelism=8)\n expected_num_blocks = [1, 2, 4, 4, 8, 8, 8, 8]\n for _, expected in zip(ds.iter_batches(), expected_num_blocks):\n assert ds._plan.execute()._num_computed() == expected\n\n\ndef test_add_column(ray_start_regular_shared):\n ds = ray.data.range(5).add_column(\"foo\", lambda x: 1)\n assert ds.take(1) == [{\"value\": 0, \"foo\": 1}]\n\n ds = ray.data.range_table(5).add_column(\"foo\", lambda x: x[\"value\"] + 1)\n assert ds.take(1) == [{\"value\": 0, \"foo\": 1}]\n\n ds = ray.data.range_table(5).add_column(\"value\", lambda x: x[\"value\"] + 1)\n assert ds.take(2) == [{\"value\": 1}, {\"value\": 2}]\n\n with pytest.raises(ValueError):\n ds = ray.data.range(5).add_column(\"value\", 0)\n\n\ndef test_map_batch(ray_start_regular_shared, tmp_path):\n # Test input validation\n ds = ray.data.range(5)\n with pytest.raises(ValueError):\n ds.map_batches(lambda x: x + 1, batch_format=\"pyarrow\", batch_size=-1).take()\n\n # Test pandas\n df = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [2, 3, 4]})\n table = pa.Table.from_pandas(df)\n pq.write_table(table, os.path.join(tmp_path, \"test1.parquet\"))\n ds = ray.data.read_parquet(str(tmp_path))\n ds2 = ds.map_batches(lambda df: df + 1, batch_size=1, batch_format=\"pandas\")\n assert ds2._dataset_format() == \"pandas\"\n ds_list = ds2.take()\n values = [s[\"one\"] for s in ds_list]\n assert values == [2, 3, 4]\n values = [s[\"two\"] for s in ds_list]\n assert values == [3, 4, 5]\n\n # Test Pyarrow\n ds = ray.data.read_parquet(str(tmp_path))\n ds2 = ds.map_batches(lambda pa: pa, batch_size=1, batch_format=\"pyarrow\")\n assert ds2._dataset_format() == \"arrow\"\n ds_list = ds2.take()\n values = [s[\"one\"] for s in ds_list]\n assert values == [1, 2, 3]\n values = [s[\"two\"] for s in ds_list]\n assert values == [2, 3, 4]\n\n # Test batch\n size = 300\n ds = ray.data.range(size)\n ds2 = ds.map_batches(lambda df: df + 1, batch_size=17, batch_format=\"pandas\")\n assert ds2._dataset_format() == \"pandas\"\n ds_list = ds2.take(limit=size)\n for i in range(size):\n # The pandas column is \"value\", and it originally has rows from 0~299.\n # After the map batch, it should have 1~300.\n row = ds_list[i]\n assert row[\"value\"] == i + 1\n assert ds.count() == 300\n\n # Test the lambda returns different types than the batch_format\n # pandas => list block\n ds = ray.data.read_parquet(str(tmp_path))\n ds2 = ds.map_batches(lambda df: [1], batch_size=1)\n assert ds2._dataset_format() == \"simple\"\n ds_list = ds2.take()\n assert ds_list == [1, 1, 1]\n assert ds.count() == 3\n\n # pyarrow => list block\n ds = ray.data.read_parquet(str(tmp_path))\n ds2 = ds.map_batches(lambda df: [1], batch_size=1, batch_format=\"pyarrow\")\n assert ds2._dataset_format() == \"simple\"\n ds_list = ds2.take()\n assert ds_list == [1, 1, 1]\n assert ds.count() == 3\n\n # Test the wrong return value raises an exception.\n ds = ray.data.read_parquet(str(tmp_path))\n with pytest.raises(ValueError):\n ds_list = ds.map_batches(\n lambda df: 1, batch_size=2, batch_format=\"pyarrow\"\n ).take()\n\n\ndef test_map_batch_actors_preserves_order(ray_start_regular_shared):\n # Test that actor compute model preserves block order.\n ds = ray.data.range(10, parallelism=5)\n assert ds.map_batches(lambda x: x, compute=\"actors\").take() == list(range(10))\n\n\ndef test_union(ray_start_regular_shared):\n ds = ray.data.range(20, parallelism=10)\n\n # Test lazy union.\n ds = ds.union(ds, ds, ds, ds)\n assert ds.num_blocks() == 50\n assert ds.count() == 100\n assert ds.sum() == 950\n\n ds = ds.union(ds)\n assert ds.count() == 200\n assert ds.sum() == (950 * 2)\n\n # Test materialized union.\n ds2 = ray.data.from_items([1, 2, 3, 4, 5])\n assert ds2.count() == 5\n ds2 = ds2.union(ds2)\n assert ds2.count() == 10\n ds2 = ds2.union(ds)\n assert ds2.count() == 210\n\n\ndef test_from_dask(ray_start_regular_shared):\n import dask.dataframe as dd\n\n df = pd.DataFrame({\"one\": list(range(100)), \"two\": list(range(100))})\n ddf = dd.from_pandas(df, npartitions=10)\n ds = ray.data.from_dask(ddf)\n dfds = ds.to_pandas()\n assert df.equals(dfds)\n\n\ndef test_to_dask(ray_start_regular_shared):\n from ray.util.dask import ray_dask_get\n\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n df = pd.concat([df1, df2])\n ds = ray.data.from_pandas([df1, df2])\n ddf = ds.to_dask()\n # Explicit Dask-on-Ray\n assert df.equals(ddf.compute(scheduler=ray_dask_get))\n # Implicit Dask-on-Ray.\n assert df.equals(ddf.compute())\n\n\ndef test_from_modin(ray_start_regular_shared):\n import modin.pandas as mopd\n\n df = pd.DataFrame(\n {\"one\": list(range(100)), \"two\": list(range(100))},\n )\n modf = mopd.DataFrame(df)\n ds = ray.data.from_modin(modf)\n dfds = ds.to_pandas()\n assert df.equals(dfds)\n\n\ndef test_to_modin(ray_start_regular_shared):\n # create two modin dataframes\n # one directly from a pandas dataframe, and\n # another from ray.dataset created from the original pandas dataframe\n #\n import modin.pandas as mopd\n\n df = pd.DataFrame(\n {\"one\": list(range(100)), \"two\": list(range(100))},\n )\n modf1 = mopd.DataFrame(df)\n ds = ray.data.from_pandas([df])\n modf2 = ds.to_modin()\n assert modf1.equals(modf2)\n\n\[email protected](\"pipelined\", [False, True])\ndef test_to_tf(ray_start_regular_shared, pipelined):\n import tensorflow as tf\n\n df1 = pd.DataFrame(\n {\"one\": [1, 2, 3], \"two\": [1.0, 2.0, 3.0], \"label\": [1.0, 2.0, 3.0]}\n )\n df2 = pd.DataFrame(\n {\"one\": [4, 5, 6], \"two\": [4.0, 5.0, 6.0], \"label\": [4.0, 5.0, 6.0]}\n )\n df3 = pd.DataFrame({\"one\": [7, 8], \"two\": [7.0, 8.0], \"label\": [7.0, 8.0]})\n df = pd.concat([df1, df2, df3])\n ds = ray.data.from_pandas([df1, df2, df3])\n ds = maybe_pipeline(ds, pipelined)\n tfd = ds.to_tf(\n label_column=\"label\",\n output_signature=(\n tf.TensorSpec(shape=(None, 2), dtype=tf.float32),\n tf.TensorSpec(shape=(None), dtype=tf.float32),\n ),\n )\n iterations = []\n for batch in tfd.as_numpy_iterator():\n iterations.append(np.concatenate((batch[0], batch[1].reshape(-1, 1)), axis=1))\n combined_iterations = np.concatenate(iterations)\n assert np.array_equal(df.values, combined_iterations)\n\n\ndef test_to_tf_feature_columns_list(ray_start_regular_shared):\n import tensorflow as tf\n\n df = pd.DataFrame({\"X1\": [1, 2, 3], \"X2\": [4, 5, 6], \"X3\": [7, 8, 9]})\n ds = ray.data.from_pandas([df])\n\n feature_columns = [\"X1\", \"X3\"]\n batch_size = 2\n dataset = ds.to_tf(\n feature_columns=feature_columns,\n output_signature=tf.TensorSpec(shape=(None, len(feature_columns))),\n batch_size=batch_size,\n )\n\n batches = list(dataset.as_numpy_iterator())\n assert len(batches) == math.ceil(len(df) / batch_size)\n assert np.array_equal(batches[0], np.array([[1, 7], [2, 8]]))\n assert np.array_equal(batches[1], np.array([[3, 9]]))\n\n\ndef test_to_tf_feature_columns_list_with_label(ray_start_regular_shared):\n import tensorflow as tf\n\n df = pd.DataFrame({\"X1\": [1, 2, 3], \"X2\": [4, 5, 6], \"Y\": [7, 8, 9]})\n ds = ray.data.from_pandas([df])\n\n feature_columns = [\"X1\", \"X2\"]\n output_signature = [\n tf.TensorSpec(shape=(None, len(feature_columns))),\n tf.TensorSpec(shape=(None)),\n ]\n batch_size = 2\n dataset = ds.to_tf(\n feature_columns=feature_columns,\n label_column=\"Y\",\n output_signature=output_signature,\n batch_size=batch_size,\n )\n\n batches = list(dataset.as_numpy_iterator())\n assert len(batches) == math.ceil(len(df) / batch_size)\n # Each batch should be a two-tuple corresponding to (features, labels).\n assert all(len(batch) == 2 for batch in batches)\n assert np.array_equal(batches[0][0], np.array([[1, 4], [2, 5]]))\n assert np.array_equal(batches[0][1], np.array([7, 8]))\n assert np.array_equal(batches[1][0], np.array([[3, 6]]))\n assert np.array_equal(batches[1][1], np.array([9]))\n\n\ndef test_to_tf_feature_columns_nested_list(ray_start_regular_shared):\n import tensorflow as tf\n\n df = pd.DataFrame({\"X1\": [1, 2, 3], \"X2\": [4, 5, 6], \"X3\": [7, 8, 9]})\n ds = ray.data.from_pandas([df])\n\n feature_columns = [[\"X1\", \"X2\"], [\"X3\"]]\n output_signature = [\n tf.TensorSpec(shape=(None, len(feature_columns[0]))),\n tf.TensorSpec(shape=(None, len(feature_columns[1]))),\n ]\n batch_size = 2\n dataset = ds.to_tf(\n feature_columns=feature_columns,\n output_signature=output_signature,\n batch_size=batch_size,\n )\n\n batches = list(dataset.as_numpy_iterator())\n assert len(batches) == math.ceil(len(df) / batch_size)\n assert all(len(batch) == len(feature_columns) for batch in batches)\n assert np.array_equal(batches[0][0], np.array([[1, 4], [2, 5]]))\n assert np.array_equal(batches[0][1], np.array([[7], [8]]))\n assert np.array_equal(batches[1][0], np.array([[3, 6]]))\n assert np.array_equal(batches[1][1], np.array([[9]]))\n\n\ndef test_to_tf_feature_columns_dict(ray_start_regular_shared):\n import tensorflow as tf\n\n df = pd.DataFrame({\"X1\": [1, 2, 3], \"X2\": [4, 5, 6], \"X3\": [7, 8, 9]})\n ds = ray.data.from_pandas([df])\n\n feature_columns = {\"A\": [\"X1\", \"X2\"], \"B\": [\"X3\"]}\n output_signature = {\n \"A\": tf.TensorSpec(shape=(None, len(feature_columns[\"A\"]))),\n \"B\": tf.TensorSpec(shape=(None, len(feature_columns[\"B\"]))),\n }\n batch_size = 2\n dataset = ds.to_tf(\n feature_columns=feature_columns, output_signature=output_signature, batch_size=2\n )\n\n batches = list(dataset.as_numpy_iterator())\n assert len(batches) == math.ceil(len(df) / batch_size)\n assert all(batch.keys() == feature_columns.keys() for batch in batches)\n assert np.array_equal(batches[0][\"A\"], np.array([[1, 4], [2, 5]]))\n assert np.array_equal(batches[0][\"B\"], np.array([[7], [8]]))\n assert np.array_equal(batches[1][\"A\"], np.array([[3, 6]]))\n assert np.array_equal(batches[1][\"B\"], np.array([[9]]))\n\n\ndef test_to_tf_feature_columns_dict_with_label(ray_start_regular_shared):\n import tensorflow as tf\n\n df = pd.DataFrame({\"X1\": [1, 2, 3], \"X2\": [4, 5, 6], \"Y\": [7, 8, 9]})\n ds = ray.data.from_pandas([df])\n\n feature_columns = {\"A\": [\"X1\", \"X2\"]}\n output_signature = (\n {\n \"A\": tf.TensorSpec(shape=(None, len(feature_columns[\"A\"]))),\n },\n tf.TensorSpec(shape=(None)),\n )\n batch_size = 2\n dataset = ds.to_tf(\n feature_columns=feature_columns,\n label_column=\"Y\",\n output_signature=output_signature,\n batch_size=2,\n )\n\n batches = list(dataset.as_numpy_iterator())\n assert len(batches) == math.ceil(len(df) / batch_size)\n # Each batch should be a two-tuple corresponding to (features, labels).\n assert all(len(batch) == 2 for batch in batches)\n assert all(features.keys() == feature_columns.keys() for features, _ in batches)\n\n features0, labels0 = batches[0]\n assert np.array_equal(features0[\"A\"], np.array([[1, 4], [2, 5]]))\n assert np.array_equal(labels0, np.array([7, 8]))\n\n features1, labels1 = batches[1]\n assert np.array_equal(features1[\"A\"], np.array([[3, 6]]))\n assert np.array_equal(labels1, np.array([9]))\n\n\[email protected](\"pipelined\", [False, True])\ndef test_to_torch(ray_start_regular_shared, pipelined):\n import torch\n\n df1 = pd.DataFrame(\n {\"one\": [1, 2, 3], \"two\": [1.0, 2.0, 3.0], \"label\": [1.0, 2.0, 3.0]}\n )\n df2 = pd.DataFrame(\n {\"one\": [4, 5, 6], \"two\": [4.0, 5.0, 6.0], \"label\": [4.0, 5.0, 6.0]}\n )\n df3 = pd.DataFrame({\"one\": [7, 8], \"two\": [7.0, 8.0], \"label\": [7.0, 8.0]})\n df = pd.concat([df1, df2, df3])\n ds = ray.data.from_pandas([df1, df2, df3])\n ds = maybe_pipeline(ds, pipelined)\n torchd = ds.to_torch(label_column=\"label\", batch_size=3)\n\n num_epochs = 1 if pipelined else 2\n for _ in range(num_epochs):\n iterations = []\n for batch in iter(torchd):\n iterations.append(torch.cat((batch[0], batch[1]), dim=1).numpy())\n combined_iterations = np.concatenate(iterations)\n assert np.array_equal(np.sort(df.values), np.sort(combined_iterations))\n\n\[email protected](\"input\", [\"single\", \"list\", \"dict\"])\[email protected](\"force_dtype\", [False, True])\[email protected](\"label_type\", [None, \"squeezed\", \"unsqueezed\"])\ndef test_to_torch_feature_columns(\n ray_start_regular_shared, input, force_dtype, label_type\n):\n import torch\n\n df1 = pd.DataFrame(\n {\n \"one\": [1, 2, 3],\n \"two\": [1.0, 2.0, 3.0],\n \"three\": [4.0, 5.0, 6.0],\n \"label\": [1.0, 2.0, 3.0],\n }\n )\n df2 = pd.DataFrame(\n {\n \"one\": [4, 5, 6],\n \"two\": [4.0, 5.0, 6.0],\n \"three\": [7.0, 8.0, 9.0],\n \"label\": [4.0, 5.0, 6.0],\n }\n )\n df3 = pd.DataFrame(\n {\"one\": [7, 8], \"two\": [7.0, 8.0], \"three\": [10.0, 11.0], \"label\": [7.0, 8.0]}\n )\n df = pd.concat([df1, df2, df3]).drop(\"three\", axis=1)\n ds = ray.data.from_pandas([df1, df2, df3])\n\n feature_column_dtypes = None\n label_column_dtype = None\n if force_dtype:\n label_column_dtype = torch.long\n if input == \"single\":\n feature_columns = [\"one\", \"two\"]\n if force_dtype:\n feature_column_dtypes = torch.long\n elif input == \"list\":\n feature_columns = [[\"one\"], [\"two\"]]\n if force_dtype:\n feature_column_dtypes = [torch.long, torch.long]\n elif input == \"dict\":\n feature_columns = {\"X1\": [\"one\"], \"X2\": [\"two\"]}\n if force_dtype:\n feature_column_dtypes = {\"X1\": torch.long, \"X2\": torch.long}\n\n label_column = None if label_type is None else \"label\"\n unsqueeze_label_tensor = label_type == \"unsqueezed\"\n\n torchd = ds.to_torch(\n label_column=label_column,\n feature_columns=feature_columns,\n feature_column_dtypes=feature_column_dtypes,\n label_column_dtype=label_column_dtype,\n unsqueeze_label_tensor=unsqueeze_label_tensor,\n batch_size=3,\n )\n iterations = []\n\n for batch in iter(torchd):\n features, label = batch\n\n if input == \"single\":\n assert isinstance(features, torch.Tensor)\n if force_dtype:\n assert features.dtype == torch.long\n data = features\n elif input == \"list\":\n assert isinstance(features, list)\n assert all(isinstance(item, torch.Tensor) for item in features)\n if force_dtype:\n assert all(item.dtype == torch.long for item in features)\n data = torch.cat(tuple(features), dim=1)\n elif input == \"dict\":\n assert isinstance(features, dict)\n assert all(isinstance(item, torch.Tensor) for item in features.values())\n if force_dtype:\n assert all(item.dtype == torch.long for item in features.values())\n data = torch.cat(tuple(features.values()), dim=1)\n\n if not label_type:\n assert label is None\n else:\n assert isinstance(label, torch.Tensor)\n if force_dtype:\n assert label.dtype == torch.long\n if unsqueeze_label_tensor:\n assert label.dim() == 2\n else:\n assert label.dim() == 1\n label = label.view(-1, 1)\n data = torch.cat((data, label), dim=1)\n iterations.append(data.numpy())\n\n combined_iterations = np.concatenate(iterations)\n if not label_type:\n df.drop(\"label\", axis=1, inplace=True)\n assert np.array_equal(df.values, combined_iterations)\n\n\ndef test_block_builder_for_block(ray_start_regular_shared):\n # list\n builder = BlockBuilder.for_block(list())\n builder.add_block([1, 2])\n assert builder.build() == [1, 2]\n builder.add_block([3, 4])\n assert builder.build() == [1, 2, 3, 4]\n\n # pandas dataframe\n builder = BlockBuilder.for_block(pd.DataFrame())\n b1 = pd.DataFrame({\"A\": [1], \"B\": [\"a\"]})\n builder.add_block(b1)\n assert builder.build().equals(b1)\n b2 = pd.DataFrame({\"A\": [2, 3], \"B\": [\"c\", \"d\"]})\n builder.add_block(b2)\n expected = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [\"a\", \"c\", \"d\"]})\n assert builder.build().equals(expected)\n\n # pyarrow table\n builder = BlockBuilder.for_block(pa.Table.from_arrays(list()))\n b1 = pa.Table.from_pydict({\"A\": [1], \"B\": [\"a\"]})\n builder.add_block(b1)\n builder.build().equals(b1)\n b2 = pa.Table.from_pydict({\"A\": [2, 3], \"B\": [\"c\", \"d\"]})\n builder.add_block(b2)\n expected = pa.Table.from_pydict({\"A\": [1, 2, 3], \"B\": [\"a\", \"c\", \"d\"]})\n builder.build().equals(expected)\n\n # wrong type\n with pytest.raises(TypeError):\n BlockBuilder.for_block(str())\n\n\ndef test_groupby_arrow(ray_start_regular_shared):\n # Test empty dataset.\n agg_ds = (\n ray.data.range_table(10)\n .filter(lambda r: r[\"value\"] > 10)\n .groupby(\"value\")\n .count()\n )\n assert agg_ds.count() == 0\n\n\ndef test_groupby_errors(ray_start_regular_shared):\n ds = ray.data.range(100)\n\n ds.groupby(None).count().show() # OK\n ds.groupby(lambda x: x % 2).count().show() # OK\n with pytest.raises(ValueError):\n ds.groupby(\"foo\").count().show()\n\n ds = ray.data.range_table(100)\n ds.groupby(None).count().show() # OK\n with pytest.raises(ValueError):\n ds.groupby(lambda x: x % 2).count().show()\n\n\ndef test_agg_errors(ray_start_regular_shared):\n ds = ray.data.range(100)\n from ray.data.aggregate import Max\n\n ds.aggregate(Max()) # OK\n ds.aggregate(Max(lambda x: x)) # OK\n with pytest.raises(ValueError):\n ds.aggregate(Max(\"foo\"))\n\n ds = ray.data.range_table(100)\n ds.aggregate(Max(\"value\")) # OK\n with pytest.raises(ValueError):\n ds.aggregate(Max())\n with pytest.raises(ValueError):\n ds.aggregate(Max(lambda x: x))\n with pytest.raises(ValueError):\n ds.aggregate(Max(\"bad_field\"))\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_agg_name_conflict(ray_start_regular_shared, num_parts):\n # Test aggregation name conflict.\n xs = list(range(100))\n grouped_ds = (\n ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs])\n .repartition(num_parts)\n .groupby(\"A\")\n )\n agg_ds = grouped_ds.aggregate(\n AggregateFn(\n init=lambda k: [0, 0],\n accumulate_row=lambda a, r: [a[0] + r[\"B\"], a[1] + 1],\n merge=lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]],\n finalize=lambda a: a[0] / a[1],\n name=\"foo\",\n ),\n AggregateFn(\n init=lambda k: [0, 0],\n accumulate_row=lambda a, r: [a[0] + r[\"B\"], a[1] + 1],\n merge=lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]],\n finalize=lambda a: a[0] / a[1],\n name=\"foo\",\n ),\n )\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"foo\": 49.5, \"foo_2\": 49.5},\n {\"A\": 1, \"foo\": 49.0, \"foo_2\": 49.0},\n {\"A\": 2, \"foo\": 50.0, \"foo_2\": 50.0},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_count(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in count aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_arrow_count with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n agg_ds = ds.groupby(\"A\").count()\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"count()\": 34},\n {\"A\": 1, \"count()\": 33},\n {\"A\": 2, \"count()\": 33},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_sum(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in sum aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_tabular_sum with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n\n agg_ds = ds.groupby(\"A\").sum(\"B\")\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"sum(B)\": 1683},\n {\"A\": 1, \"sum(B)\": 1617},\n {\"A\": 2, \"sum(B)\": 1650},\n ]\n\n # Test built-in sum aggregation with nans\n ds = ray.data.from_items(\n [{\"A\": (x % 3), \"B\": x} for x in xs] + [{\"A\": 0, \"B\": None}]\n ).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_grouped_ds = ds.groupby(\"A\")\n nan_agg_ds = nan_grouped_ds.sum(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"sum(B)\": 1683},\n {\"A\": 1, \"sum(B)\": 1617},\n {\"A\": 2, \"sum(B)\": 1650},\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.sum(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"sum(B)\": None},\n {\"A\": 1, \"sum(B)\": 1617},\n {\"A\": 2, \"sum(B)\": 1650},\n ]\n # Test all nans\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": None} for x in xs]).repartition(\n num_parts\n )\n nan_agg_ds = ds.groupby(\"A\").sum(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"sum(B)\": None},\n {\"A\": 1, \"sum(B)\": None},\n {\"A\": 2, \"sum(B)\": None},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_global_tabular_sum(ray_start_regular_shared, ds_format, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_global_arrow_sum with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n # Test built-in global sum aggregation\n ds = ray.data.from_items([{\"A\": x} for x in xs]).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.sum(\"A\") == 4950\n\n # Test empty dataset\n ds = ray.data.range_table(10)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.filter(lambda r: r[\"value\"] > 10).sum(\"value\") is None\n\n # Test built-in global sum aggregation with nans\n nan_ds = ray.data.from_items([{\"A\": x} for x in xs] + [{\"A\": None}]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.sum(\"A\") == 4950\n # Test ignore_nulls=False\n assert nan_ds.sum(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.sum(\"A\") is None\n assert nan_ds.sum(\"A\", ignore_nulls=False) is None\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_min(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in min aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_tabular_min with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n\n agg_ds = ds.groupby(\"A\").min(\"B\")\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"min(B)\": 0},\n {\"A\": 1, \"min(B)\": 1},\n {\"A\": 2, \"min(B)\": 2},\n ]\n\n # Test built-in min aggregation with nans\n ds = ray.data.from_items(\n [{\"A\": (x % 3), \"B\": x} for x in xs] + [{\"A\": 0, \"B\": None}]\n ).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_grouped_ds = ds.groupby(\"A\")\n nan_agg_ds = nan_grouped_ds.min(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"min(B)\": 0},\n {\"A\": 1, \"min(B)\": 1},\n {\"A\": 2, \"min(B)\": 2},\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.min(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"min(B)\": None},\n {\"A\": 1, \"min(B)\": 1},\n {\"A\": 2, \"min(B)\": 2},\n ]\n # Test all nans\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": None} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_agg_ds = ds.groupby(\"A\").min(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"min(B)\": None},\n {\"A\": 1, \"min(B)\": None},\n {\"A\": 2, \"min(B)\": None},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_global_tabular_min(ray_start_regular_shared, ds_format, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_global_arrow_min with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n # Test built-in global min aggregation\n ds = ray.data.from_items([{\"A\": x} for x in xs]).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.min(\"A\") == 0\n\n # Test empty dataset\n ds = ray.data.range_table(10)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.filter(lambda r: r[\"value\"] > 10).min(\"value\") is None\n\n # Test built-in global min aggregation with nans\n nan_ds = ray.data.from_items([{\"A\": x} for x in xs] + [{\"A\": None}]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.min(\"A\") == 0\n # Test ignore_nulls=False\n assert nan_ds.min(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.min(\"A\") is None\n assert nan_ds.min(\"A\", ignore_nulls=False) is None\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_max(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in max aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_tabular_max with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n\n agg_ds = ds.groupby(\"A\").max(\"B\")\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": 99},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n\n # Test built-in min aggregation with nans\n ds = ray.data.from_items(\n [{\"A\": (x % 3), \"B\": x} for x in xs] + [{\"A\": 0, \"B\": None}]\n ).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_grouped_ds = ds.groupby(\"A\")\n nan_agg_ds = nan_grouped_ds.max(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": 99},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.max(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": None},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n # Test all nans\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": None} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_agg_ds = ds.groupby(\"A\").max(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": None},\n {\"A\": 1, \"max(B)\": None},\n {\"A\": 2, \"max(B)\": None},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_global_tabular_max(ray_start_regular_shared, ds_format, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_global_arrow_max with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n # Test built-in global max aggregation\n ds = ray.data.from_items([{\"A\": x} for x in xs]).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.max(\"A\") == 99\n\n # Test empty dataset\n ds = ray.data.range_table(10)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.filter(lambda r: r[\"value\"] > 10).max(\"value\") is None\n\n # Test built-in global max aggregation with nans\n nan_ds = ray.data.from_items([{\"A\": x} for x in xs] + [{\"A\": None}]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.max(\"A\") == 99\n # Test ignore_nulls=False\n assert nan_ds.max(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.max(\"A\") is None\n assert nan_ds.max(\"A\", ignore_nulls=False) is None\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_mean(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in mean aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_tabular_mean with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n\n agg_ds = ds.groupby(\"A\").mean(\"B\")\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": 49.5},\n {\"A\": 1, \"mean(B)\": 49.0},\n {\"A\": 2, \"mean(B)\": 50.0},\n ]\n\n # Test built-in min aggregation with nans\n ds = ray.data.from_items(\n [{\"A\": (x % 3), \"B\": x} for x in xs] + [{\"A\": 0, \"B\": None}]\n ).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_grouped_ds = ds.groupby(\"A\")\n nan_agg_ds = nan_grouped_ds.mean(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": 49.5},\n {\"A\": 1, \"mean(B)\": 49.0},\n {\"A\": 2, \"mean(B)\": 50.0},\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.mean(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": None},\n {\"A\": 1, \"mean(B)\": 49.0},\n {\"A\": 2, \"mean(B)\": 50.0},\n ]\n # Test all nans\n ds = ray.data.from_items([{\"A\": (x % 3), \"B\": None} for x in xs]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n nan_agg_ds = ds.groupby(\"A\").mean(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": None},\n {\"A\": 1, \"mean(B)\": None},\n {\"A\": 2, \"mean(B)\": None},\n ]\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_global_tabular_mean(ray_start_regular_shared, ds_format, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_global_arrow_mean with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n # Test built-in global mean aggregation\n ds = ray.data.from_items([{\"A\": x} for x in xs]).repartition(num_parts)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.mean(\"A\") == 49.5\n\n # Test empty dataset\n ds = ray.data.range_table(10)\n if ds_format == \"pandas\":\n ds = _to_pandas(ds)\n assert ds.filter(lambda r: r[\"value\"] > 10).mean(\"value\") is None\n\n # Test built-in global mean aggregation with nans\n nan_ds = ray.data.from_items([{\"A\": x} for x in xs] + [{\"A\": None}]).repartition(\n num_parts\n )\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.mean(\"A\") == 49.5\n # Test ignore_nulls=False\n assert nan_ds.mean(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.mean(\"A\") is None\n assert nan_ds.mean(\"A\", ignore_nulls=False) is None\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_groupby_tabular_std(ray_start_regular_shared, ds_format, num_parts):\n # Test built-in std aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_tabular_std with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_arrow(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pyarrow\")\n\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs})\n ds = ray.data.from_pandas(df).repartition(num_parts)\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n agg_ds = ds.groupby(\"A\").std(\"B\")\n assert agg_ds.count() == 3\n result = agg_ds.to_pandas()[\"std(B)\"].to_numpy()\n expected = df.groupby(\"A\")[\"B\"].std().to_numpy()\n np.testing.assert_array_almost_equal(result, expected)\n # ddof of 0\n ds = ray.data.from_pandas(df).repartition(num_parts)\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n agg_ds = ds.groupby(\"A\").std(\"B\", ddof=0)\n assert agg_ds.count() == 3\n result = agg_ds.to_pandas()[\"std(B)\"].to_numpy()\n expected = df.groupby(\"A\")[\"B\"].std(ddof=0).to_numpy()\n np.testing.assert_array_almost_equal(result, expected)\n\n # Test built-in std aggregation with nans\n nan_df = pd.DataFrame({\"A\": [x % 3 for x in xs] + [0], \"B\": xs + [None]})\n ds = ray.data.from_pandas(nan_df).repartition(num_parts)\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n nan_grouped_ds = ds.groupby(\"A\")\n nan_agg_ds = nan_grouped_ds.std(\"B\")\n assert nan_agg_ds.count() == 3\n result = nan_agg_ds.to_pandas()[\"std(B)\"].to_numpy()\n expected = nan_df.groupby(\"A\")[\"B\"].std().to_numpy()\n np.testing.assert_array_almost_equal(result, expected)\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.std(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n result = nan_agg_ds.to_pandas()[\"std(B)\"].to_numpy()\n expected = nan_df.groupby(\"A\")[\"B\"].std()\n expected[0] = None\n np.testing.assert_array_almost_equal(result, expected)\n # Test all nans\n nan_df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": [None] * len(xs)})\n ds = ray.data.from_pandas(nan_df).repartition(num_parts)\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n nan_agg_ds = ds.groupby(\"A\").std(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n result = nan_agg_ds.to_pandas()[\"std(B)\"].to_numpy()\n expected = pd.Series([None] * 3)\n np.testing.assert_array_equal(result, expected)\n\n\[email protected](\"num_parts\", [1, 30])\[email protected](\"ds_format\", [\"arrow\", \"pandas\"])\ndef test_global_tabular_std(ray_start_regular_shared, ds_format, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_global_arrow_std with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n\n def _to_arrow(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pyarrow\")\n\n def _to_pandas(ds):\n return ds.map_batches(lambda x: x, batch_size=None, batch_format=\"pandas\")\n\n # Test built-in global max aggregation\n df = pd.DataFrame({\"A\": xs})\n ds = ray.data.from_pandas(df).repartition(num_parts)\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n assert math.isclose(ds.std(\"A\"), df[\"A\"].std())\n assert math.isclose(ds.std(\"A\", ddof=0), df[\"A\"].std(ddof=0))\n\n # Test empty dataset\n ds = ray.data.from_pandas(pd.DataFrame({\"A\": []}))\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n assert ds.std(\"A\") is None\n # Test edge cases\n ds = ray.data.from_pandas(pd.DataFrame({\"A\": [3]}))\n if ds_format == \"arrow\":\n ds = _to_arrow(ds)\n assert ds.std(\"A\") == 0\n\n # Test built-in global std aggregation with nans\n nan_df = pd.DataFrame({\"A\": xs + [None]})\n nan_ds = ray.data.from_pandas(nan_df).repartition(num_parts)\n if ds_format == \"arrow\":\n nan_ds = _to_arrow(nan_ds)\n assert math.isclose(nan_ds.std(\"A\"), nan_df[\"A\"].std())\n # Test ignore_nulls=False\n assert nan_ds.std(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n if ds_format == \"pandas\":\n nan_ds = _to_pandas(nan_ds)\n assert nan_ds.std(\"A\") is None\n assert nan_ds.std(\"A\", ignore_nulls=False) is None\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_arrow_multicolumn(ray_start_regular_shared, num_parts):\n # Test built-in mean aggregation on multiple columns\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_arrow_multicolumn with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs, \"C\": [2 * x for x in xs]})\n agg_ds = (\n ray.data.from_pandas(df).repartition(num_parts).groupby(\"A\").mean([\"B\", \"C\"])\n )\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": 49.5, \"mean(C)\": 99.0},\n {\"A\": 1, \"mean(B)\": 49.0, \"mean(C)\": 98.0},\n {\"A\": 2, \"mean(B)\": 50.0, \"mean(C)\": 100.0},\n ]\n\n # Test that unspecified agg column ==> agg on all columns except for\n # groupby keys.\n agg_ds = ray.data.from_pandas(df).repartition(num_parts).groupby(\"A\").mean()\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"mean(B)\": 49.5, \"mean(C)\": 99.0},\n {\"A\": 1, \"mean(B)\": 49.0, \"mean(C)\": 98.0},\n {\"A\": 2, \"mean(B)\": 50.0, \"mean(C)\": 100.0},\n ]\n\n # Test built-in global mean aggregation\n df = pd.DataFrame({\"A\": xs, \"B\": [2 * x for x in xs]})\n result_row = ray.data.from_pandas(df).repartition(num_parts).mean([\"A\", \"B\"])\n assert result_row[\"mean(A)\"] == df[\"A\"].mean()\n assert result_row[\"mean(B)\"] == df[\"B\"].mean()\n\n\ndef test_groupby_agg_bad_on(ray_start_regular_shared):\n # Test bad on for groupby aggregation\n xs = list(range(100))\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs, \"C\": [2 * x for x in xs]})\n # Wrong type.\n with pytest.raises(TypeError):\n ray.data.from_pandas(df).groupby(\"A\").mean(5)\n with pytest.raises(TypeError):\n ray.data.from_pandas(df).groupby(\"A\").mean([5])\n # Empty list.\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).groupby(\"A\").mean([])\n # Nonexistent column.\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).groupby(\"A\").mean(\"D\")\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).groupby(\"A\").mean([\"B\", \"D\"])\n # Columns for simple Dataset.\n with pytest.raises(ValueError):\n ray.data.from_items(xs).groupby(lambda x: x % 3 == 0).mean(\"A\")\n\n # Test bad on for global aggregation\n # Wrong type.\n with pytest.raises(TypeError):\n ray.data.from_pandas(df).mean(5)\n with pytest.raises(TypeError):\n ray.data.from_pandas(df).mean([5])\n # Empty list.\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).mean([])\n # Nonexistent column.\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).mean(\"D\")\n with pytest.raises(ValueError):\n ray.data.from_pandas(df).mean([\"B\", \"D\"])\n # Columns for simple Dataset.\n with pytest.raises(ValueError):\n ray.data.from_items(xs).mean(\"A\")\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_arrow_multi_agg(ray_start_regular_shared, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_arrow_multi_agg with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs})\n agg_ds = (\n ray.data.from_pandas(df)\n .repartition(num_parts)\n .groupby(\"A\")\n .aggregate(\n Count(),\n Sum(\"B\"),\n Min(\"B\"),\n Max(\"B\"),\n Mean(\"B\"),\n Std(\"B\"),\n )\n )\n assert agg_ds.count() == 3\n agg_df = agg_ds.to_pandas()\n expected_grouped = df.groupby(\"A\")[\"B\"]\n np.testing.assert_array_equal(agg_df[\"count()\"].to_numpy(), [34, 33, 33])\n for agg in [\"sum\", \"min\", \"max\", \"mean\", \"std\"]:\n result = agg_df[f\"{agg}(B)\"].to_numpy()\n expected = getattr(expected_grouped, agg)().to_numpy()\n if agg == \"std\":\n np.testing.assert_array_almost_equal(result, expected)\n else:\n np.testing.assert_array_equal(result, expected)\n # Test built-in global std aggregation\n df = pd.DataFrame({\"A\": xs})\n\n result_row = (\n ray.data.from_pandas(df)\n .repartition(num_parts)\n .aggregate(\n Sum(\"A\"),\n Min(\"A\"),\n Max(\"A\"),\n Mean(\"A\"),\n Std(\"A\"),\n )\n )\n for agg in [\"sum\", \"min\", \"max\", \"mean\", \"std\"]:\n result = result_row[f\"{agg}(A)\"]\n expected = getattr(df[\"A\"], agg)()\n if agg == \"std\":\n assert math.isclose(result, expected)\n else:\n assert result == expected\n\n\ndef test_groupby_simple(ray_start_regular_shared):\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple with: {seed}\")\n random.seed(seed)\n parallelism = 3\n xs = [\n (\"A\", 2),\n (\"A\", 4),\n (\"A\", 9),\n (\"B\", 10),\n (\"B\", 20),\n (\"C\", 3),\n (\"C\", 5),\n (\"C\", 8),\n (\"C\", 12),\n ]\n random.shuffle(xs)\n ds = ray.data.from_items(xs, parallelism=parallelism)\n\n # Mean aggregation\n agg_ds = ds.groupby(lambda r: r[0]).aggregate(\n AggregateFn(\n init=lambda k: (0, 0),\n accumulate_row=lambda a, r: (a[0] + r[1], a[1] + 1),\n merge=lambda a1, a2: (a1[0] + a2[0], a1[1] + a2[1]),\n finalize=lambda a: a[0] / a[1],\n )\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(\"A\", 5), (\"B\", 15), (\"C\", 7)]\n\n # Test None row\n parallelism = 2\n xs = [\"A\", \"A\", \"A\", None, None, None, \"B\"]\n random.shuffle(xs)\n ds = ray.data.from_items(xs, parallelism=parallelism)\n # Count aggregation\n agg_ds = ds.groupby(lambda r: str(r)).aggregate(\n AggregateFn(\n init=lambda k: 0,\n accumulate_row=lambda a, r: a + 1,\n merge=lambda a1, a2: a1 + a2,\n )\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: str(r[0])).take(3) == [\n (\"A\", 3),\n (\"B\", 1),\n (\"None\", 3),\n ]\n\n # Test empty dataset.\n ds = ray.data.from_items([])\n agg_ds = ds.groupby(lambda r: r[0]).aggregate(\n AggregateFn(\n init=lambda k: 1 / 0, # should never reach here\n accumulate_row=lambda a, r: 1 / 0,\n merge=lambda a1, a2: 1 / 0,\n finalize=lambda a: 1 / 0,\n )\n )\n assert agg_ds.count() == 0\n assert agg_ds.take() == ds.take()\n agg_ds = ray.data.range(10).filter(lambda r: r > 10).groupby(lambda r: r).count()\n assert agg_ds.count() == 0\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_count(ray_start_regular_shared, num_parts):\n # Test built-in count aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_count with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).count()\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 34), (1, 33), (2, 33)]\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_sum(ray_start_regular_shared, num_parts):\n # Test built-in sum aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_sum with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).sum()\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 1683), (1, 1617), (2, 1650)]\n\n # Test built-in sum aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(xs + [None])\n .repartition(num_parts)\n .groupby(lambda x: int(x or 0) % 3)\n )\n nan_agg_ds = nan_grouped_ds.sum()\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [\n (0, 1683),\n (1, 1617),\n (2, 1650),\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.sum(ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [\n (0, None),\n (1, 1617),\n (2, 1650),\n ]\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([None] * len(xs))\n .repartition(num_parts)\n .groupby(lambda x: 0)\n .sum()\n )\n assert nan_agg_ds.count() == 1\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(1) == [(0, None)]\n\n # Test built-in global sum aggregation\n assert ray.data.from_items(xs).repartition(num_parts).sum() == 4950\n assert ray.data.range(10).filter(lambda r: r > 10).sum() is None\n\n # Test built-in global sum aggregation with nans\n nan_ds = ray.data.from_items(xs + [None]).repartition(num_parts)\n assert nan_ds.sum() == 4950\n # Test ignore_nulls=False\n assert nan_ds.sum(ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([None] * len(xs)).repartition(num_parts)\n assert nan_ds.sum() is None\n\n\ndef test_map_batches_combine_empty_blocks(ray_start_regular_shared):\n xs = [x % 3 for x in list(range(100))]\n\n # ds1 has 1 block which contains 100 rows.\n ds1 = ray.data.from_items(xs).repartition(1).sort().map_batches(lambda x: x)\n assert ds1._block_num_rows() == [100]\n\n # ds2 has 30 blocks, but only 3 of them are non-empty\n ds2 = ray.data.from_items(xs).repartition(30).sort().map_batches(lambda x: x)\n assert len(ds2._block_num_rows()) == 30\n count = sum(1 for x in ds2._block_num_rows() if x > 0)\n assert count == 3\n\n # The number of partitions should not affect the map_batches() result.\n assert ds1.take_all() == ds2.take_all()\n\n\ndef test_groupby_map_groups_for_empty_dataset(ray_start_regular_shared):\n ds = ray.data.from_items([])\n mapped = ds.groupby(lambda x: x % 3).map_groups(lambda x: [min(x) * min(x)])\n assert mapped.count() == 0\n assert mapped.take_all() == []\n\n\ndef test_groupby_map_groups_merging_empty_result(ray_start_regular_shared):\n ds = ray.data.from_items([1, 2, 3])\n # This needs to merge empty and non-empty results from different groups.\n mapped = ds.groupby(lambda x: x).map_groups(lambda x: [] if x == [1] else x)\n assert mapped.count() == 2\n assert mapped.take_all() == [2, 3]\n\n\ndef test_groupby_map_groups_merging_invalid_result(ray_start_regular_shared):\n ds = ray.data.from_items([1, 2, 3])\n grouped = ds.groupby(lambda x: x)\n\n # The UDF returns None, which is invalid.\n with pytest.raises(TypeError):\n grouped.map_groups(lambda x: None if x == [1] else x)\n\n # The UDF returns a type that's different than the input type, which is invalid.\n with pytest.raises(TypeError):\n grouped.map_groups(lambda x: pd.DataFrame([1]) if x == [1] else x)\n\n\[email protected](\"num_parts\", [1, 2, 30])\ndef test_groupby_map_groups_for_none_groupkey(ray_start_regular_shared, num_parts):\n ds = ray.data.from_items(list(range(100)))\n mapped = (\n ds.repartition(num_parts).groupby(None).map_groups(lambda x: [min(x) + max(x)])\n )\n assert mapped.count() == 1\n assert mapped.take_all() == [99]\n\n\[email protected](\"num_parts\", [1, 2, 30])\ndef test_groupby_map_groups_returning_empty_result(ray_start_regular_shared, num_parts):\n xs = list(range(100))\n mapped = (\n ray.data.from_items(xs)\n .repartition(num_parts)\n .groupby(lambda x: x % 3)\n .map_groups(lambda x: [])\n )\n assert mapped.count() == 0\n assert mapped.take_all() == []\n\n\[email protected](\"num_parts\", [1, 2, 3, 30])\ndef test_groupby_map_groups_for_list(ray_start_regular_shared, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_count with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n mapped = (\n ray.data.from_items(xs)\n .repartition(num_parts)\n .groupby(lambda x: x % 3)\n .map_groups(lambda x: [min(x) * min(x)])\n )\n assert mapped.count() == 3\n assert mapped.take_all() == [0, 1, 4]\n\n\[email protected](\"num_parts\", [1, 2, 3, 30])\ndef test_groupby_map_groups_for_pandas(ray_start_regular_shared, num_parts):\n df = pd.DataFrame({\"A\": \"a a b\".split(), \"B\": [1, 1, 3], \"C\": [4, 6, 5]})\n grouped = ray.data.from_pandas(df).repartition(num_parts).groupby(\"A\")\n\n # Normalize the numeric columns (i.e. B and C) for each group.\n mapped = grouped.map_groups(\n lambda g: g.apply(\n lambda col: col / g[col.name].sum() if col.name in [\"B\", \"C\"] else col\n )\n )\n\n # The function (i.e. the normalization) performed on each group doesn't\n # aggregate rows, so we still have 3 rows.\n assert mapped.count() == 3\n expected = pd.DataFrame(\n {\"A\": [\"a\", \"a\", \"b\"], \"B\": [0.5, 0.5, 1.000000], \"C\": [0.4, 0.6, 1.0]}\n )\n assert mapped.to_pandas().equals(expected)\n\n\[email protected](\"num_parts\", [1, 2, 3, 30])\ndef test_groupby_map_groups_for_arrow(ray_start_regular_shared, num_parts):\n at = pa.Table.from_pydict({\"A\": \"a a b\".split(), \"B\": [1, 1, 3], \"C\": [4, 6, 5]})\n grouped = ray.data.from_arrow(at).repartition(num_parts).groupby(\"A\")\n\n # Normalize the numeric columns (i.e. B and C) for each group.\n def normalize(at: pa.Table):\n r = at.select(\"A\")\n sb = pa.compute.sum(at.column(\"B\")).cast(pa.float64())\n r = r.append_column(\"B\", pa.compute.divide(at.column(\"B\"), sb))\n sc = pa.compute.sum(at.column(\"C\")).cast(pa.float64())\n r = r.append_column(\"C\", pa.compute.divide(at.column(\"C\"), sc))\n return r\n\n mapped = grouped.map_groups(normalize, batch_format=\"pyarrow\")\n\n # The function (i.e. the normalization) performed on each group doesn't\n # aggregate rows, so we still have 3 rows.\n assert mapped.count() == 3\n expected = pa.Table.from_pydict(\n {\"A\": [\"a\", \"a\", \"b\"], \"B\": [0.5, 0.5, 1], \"C\": [0.4, 0.6, 1]}\n )\n result = pa.Table.from_pandas(mapped.to_pandas())\n assert result.equals(expected)\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_min(ray_start_regular_shared, num_parts):\n # Test built-in min aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_min with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).min()\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 0), (1, 1), (2, 2)]\n\n # Test built-in min aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(xs + [None])\n .repartition(num_parts)\n .groupby(lambda x: int(x or 0) % 3)\n )\n nan_agg_ds = nan_grouped_ds.min()\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 0), (1, 1), (2, 2)]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.min(ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, None), (1, 1), (2, 2)]\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([None] * len(xs))\n .repartition(num_parts)\n .groupby(lambda x: 0)\n .min()\n )\n assert nan_agg_ds.count() == 1\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(1) == [(0, None)]\n\n # Test built-in global min aggregation\n assert ray.data.from_items(xs).repartition(num_parts).min() == 0\n assert ray.data.range(10).filter(lambda r: r > 10).min() is None\n\n # Test built-in global min aggregation with nans\n nan_ds = ray.data.from_items(xs + [None]).repartition(num_parts)\n assert nan_ds.min() == 0\n # Test ignore_nulls=False\n assert nan_ds.min(ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([None] * len(xs)).repartition(num_parts)\n assert nan_ds.min() is None\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_max(ray_start_regular_shared, num_parts):\n # Test built-in max aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_max with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).max()\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 99), (1, 97), (2, 98)]\n\n # Test built-in max aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(xs + [None])\n .repartition(num_parts)\n .groupby(lambda x: int(x or 0) % 3)\n )\n nan_agg_ds = nan_grouped_ds.max()\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 99), (1, 97), (2, 98)]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.max(ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, None), (1, 97), (2, 98)]\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([None] * len(xs))\n .repartition(num_parts)\n .groupby(lambda x: 0)\n .max()\n )\n assert nan_agg_ds.count() == 1\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(1) == [(0, None)]\n\n # Test built-in global max aggregation\n assert ray.data.from_items(xs).repartition(num_parts).max() == 99\n assert ray.data.range(10).filter(lambda r: r > 10).max() is None\n\n # Test built-in global max aggregation with nans\n nan_ds = ray.data.from_items(xs + [None]).repartition(num_parts)\n assert nan_ds.max() == 99\n # Test ignore_nulls=False\n assert nan_ds.max(ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([None] * len(xs)).repartition(num_parts)\n assert nan_ds.max() is None\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_mean(ray_start_regular_shared, num_parts):\n # Test built-in mean aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_mean with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).mean()\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 49.5), (1, 49.0), (2, 50.0)]\n\n # Test built-in mean aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(xs + [None])\n .repartition(num_parts)\n .groupby(lambda x: int(x or 0) % 3)\n )\n nan_agg_ds = nan_grouped_ds.mean()\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [\n (0, 49.5),\n (1, 49.0),\n (2, 50.0),\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.mean(ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(3) == [\n (0, None),\n (1, 49.0),\n (2, 50.0),\n ]\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([None] * len(xs))\n .repartition(num_parts)\n .groupby(lambda x: 0)\n .mean()\n )\n assert nan_agg_ds.count() == 1\n assert nan_agg_ds.sort(key=lambda r: r[0]).take(1) == [(0, None)]\n\n # Test built-in global mean aggregation\n assert ray.data.from_items(xs).repartition(num_parts).mean() == 49.5\n # Test empty dataset\n assert ray.data.range(10).filter(lambda r: r > 10).mean() is None\n\n # Test built-in global mean aggregation with nans\n nan_ds = ray.data.from_items(xs + [None]).repartition(num_parts)\n assert nan_ds.mean() == 49.5\n # Test ignore_nulls=False\n assert nan_ds.mean(ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([None] * len(xs)).repartition(num_parts)\n assert nan_ds.mean() is None\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_std(ray_start_regular_shared, num_parts):\n # Test built-in std aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_std with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items(xs).repartition(num_parts).groupby(lambda x: x % 3).std()\n )\n assert agg_ds.count() == 3\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs})\n expected = df.groupby(\"A\")[\"B\"].std()\n result = agg_ds.sort(key=lambda r: r[0]).take(3)\n groups, stds = zip(*result)\n result_df = pd.DataFrame({\"A\": list(groups), \"B\": list(stds)})\n result_df = result_df.set_index(\"A\")\n pd.testing.assert_series_equal(result_df[\"B\"], expected)\n # ddof of 0\n agg_ds = (\n ray.data.from_items(xs)\n .repartition(num_parts)\n .groupby(lambda x: x % 3)\n .std(ddof=0)\n )\n assert agg_ds.count() == 3\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs})\n expected = df.groupby(\"A\")[\"B\"].std(ddof=0)\n result = agg_ds.sort(key=lambda r: r[0]).take(3)\n groups, stds = zip(*result)\n result_df = pd.DataFrame({\"A\": list(groups), \"B\": list(stds)})\n result_df = result_df.set_index(\"A\")\n pd.testing.assert_series_equal(result_df[\"B\"], expected)\n\n # Test built-in std aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(xs + [None])\n .repartition(num_parts)\n .groupby(lambda x: int(x or 0) % 3)\n )\n nan_agg_ds = nan_grouped_ds.std()\n assert nan_agg_ds.count() == 3\n nan_df = pd.DataFrame({\"A\": [x % 3 for x in xs] + [0], \"B\": xs + [None]})\n expected = nan_df.groupby(\"A\")[\"B\"].std()\n result = nan_agg_ds.sort(key=lambda r: r[0]).take(3)\n groups, stds = zip(*result)\n result_df = pd.DataFrame({\"A\": list(groups), \"B\": list(stds)})\n result_df = result_df.set_index(\"A\")\n pd.testing.assert_series_equal(result_df[\"B\"], expected)\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.std(ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n expected = nan_df.groupby(\"A\")[\"B\"].std()\n expected[0] = None\n result = nan_agg_ds.sort(key=lambda r: r[0]).take(3)\n groups, stds = zip(*result)\n result_df = pd.DataFrame({\"A\": list(groups), \"B\": list(stds)})\n result_df = result_df.set_index(\"A\")\n pd.testing.assert_series_equal(result_df[\"B\"], expected)\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([None] * len(xs))\n .repartition(num_parts)\n .groupby(lambda x: 0)\n .std(ignore_nulls=False)\n )\n assert nan_agg_ds.count() == 1\n expected = pd.Series([None], name=\"B\")\n expected.index.rename(\"A\", inplace=True)\n result = nan_agg_ds.sort(key=lambda r: r[0]).take(1)\n groups, stds = zip(*result)\n result_df = pd.DataFrame({\"A\": list(groups), \"B\": list(stds)})\n result_df = result_df.set_index(\"A\")\n pd.testing.assert_series_equal(result_df[\"B\"], expected)\n\n # Test built-in global std aggregation\n assert math.isclose(\n ray.data.from_items(xs).repartition(num_parts).std(), pd.Series(xs).std()\n )\n # ddof of 0\n assert math.isclose(\n ray.data.from_items(xs).repartition(num_parts).std(ddof=0),\n pd.Series(xs).std(ddof=0),\n )\n\n # Test empty dataset\n assert ray.data.from_items([]).std() is None\n # Test edge cases\n assert ray.data.from_items([3]).std() == 0\n\n # Test built-in global std aggregation with nans\n nan_ds = ray.data.from_items(xs + [None]).repartition(num_parts)\n assert math.isclose(nan_ds.std(), pd.Series(xs).std())\n # Test ignore_nulls=False\n assert nan_ds.std(ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([None] * len(xs)).repartition(num_parts)\n assert nan_ds.std() is None\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_multilambda(ray_start_regular_shared, num_parts):\n # Test built-in mean aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_multilambda with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items([[x, 2 * x] for x in xs])\n .repartition(num_parts)\n .groupby(lambda x: x[0] % 3)\n .mean([lambda x: x[0], lambda x: x[1]])\n )\n assert agg_ds.count() == 3\n assert agg_ds.sort(key=lambda r: r[0]).take(3) == [\n (0, 49.5, 99.0),\n (1, 49.0, 98.0),\n (2, 50.0, 100.0),\n ]\n # Test built-in global mean aggregation\n assert ray.data.from_items([[x, 2 * x] for x in xs]).repartition(num_parts).mean(\n [lambda x: x[0], lambda x: x[1]]\n ) == (49.5, 99.0)\n assert ray.data.from_items([[x, 2 * x] for x in range(10)]).filter(\n lambda r: r[0] > 10\n ).mean([lambda x: x[0], lambda x: x[1]]) == (None, None)\n\n\[email protected](\"num_parts\", [1, 30])\ndef test_groupby_simple_multi_agg(ray_start_regular_shared, num_parts):\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_simple_multi_agg with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n df = pd.DataFrame({\"A\": [x % 3 for x in xs], \"B\": xs})\n agg_ds = (\n ray.data.from_items(xs)\n .repartition(num_parts)\n .groupby(lambda x: x % 3)\n .aggregate(\n Count(),\n Sum(),\n Min(),\n Max(),\n Mean(),\n Std(),\n )\n )\n assert agg_ds.count() == 3\n result = agg_ds.sort(key=lambda r: r[0]).take(3)\n groups, counts, sums, mins, maxs, means, stds = zip(*result)\n agg_df = pd.DataFrame(\n {\n \"groups\": list(groups),\n \"count\": list(counts),\n \"sum\": list(sums),\n \"min\": list(mins),\n \"max\": list(maxs),\n \"mean\": list(means),\n \"std\": list(stds),\n }\n )\n agg_df = agg_df.set_index(\"groups\")\n df = pd.DataFrame({\"groups\": [x % 3 for x in xs], \"B\": xs})\n expected_grouped = df.groupby(\"groups\")[\"B\"]\n np.testing.assert_array_equal(agg_df[\"count\"].to_numpy(), [34, 33, 33])\n for agg in [\"sum\", \"min\", \"max\", \"mean\", \"std\"]:\n result = agg_df[agg].to_numpy()\n expected = getattr(expected_grouped, agg)().to_numpy()\n if agg == \"std\":\n np.testing.assert_array_almost_equal(result, expected)\n else:\n np.testing.assert_array_equal(result, expected)\n # Test built-in global multi-aggregation\n result_row = (\n ray.data.from_items(xs)\n .repartition(num_parts)\n .aggregate(\n Sum(),\n Min(),\n Max(),\n Mean(),\n Std(),\n )\n )\n series = pd.Series(xs)\n for idx, agg in enumerate([\"sum\", \"min\", \"max\", \"mean\", \"std\"]):\n result = result_row[idx]\n expected = getattr(series, agg)()\n if agg == \"std\":\n assert math.isclose(result, expected)\n else:\n assert result == expected\n\n\ndef test_column_name_type_check(ray_start_regular_shared):\n df = pd.DataFrame({\"1\": np.random.rand(10), \"a\": np.random.rand(10)})\n ds = ray.data.from_pandas(df)\n expected_str = \"Dataset(num_blocks=1, num_rows=10, schema={1: float64, a: float64})\"\n assert str(ds) == expected_str, str(ds)\n df = pd.DataFrame({1: np.random.rand(10), \"a\": np.random.rand(10)})\n with pytest.raises(ValueError):\n ray.data.from_pandas(df)\n\n\ndef test_random_sample(ray_start_regular_shared):\n import math\n\n def ensure_sample_size_close(dataset, sample_percent=0.5):\n r1 = ds.random_sample(sample_percent)\n assert math.isclose(\n r1.count(), int(ds.count() * sample_percent), rel_tol=2, abs_tol=2\n )\n\n ds = ray.data.range(10, parallelism=2)\n ensure_sample_size_close(ds)\n\n ds = ray.data.range_table(10, parallelism=2)\n ensure_sample_size_close(ds)\n\n ds = ray.data.range_tensor(5, parallelism=2, shape=(2, 2))\n ensure_sample_size_close(ds)\n\n # imbalanced datasets\n ds1 = ray.data.range(1, parallelism=1)\n ds2 = ray.data.range(2, parallelism=1)\n ds3 = ray.data.range(3, parallelism=1)\n # noinspection PyTypeChecker\n ds = ds1.union(ds2).union(ds3)\n ensure_sample_size_close(ds)\n # Small datasets\n ds1 = ray.data.range(5, parallelism=5)\n ensure_sample_size_close(ds1)\n\n\ndef test_random_sample_checks(ray_start_regular_shared):\n with pytest.raises(ValueError):\n # Cannot sample -1\n ray.data.range(1).random_sample(-1)\n with pytest.raises(ValueError):\n # Cannot sample from empty dataset\n ray.data.range(0).random_sample(0.2)\n with pytest.raises(ValueError):\n # Cannot sample fraction > 1\n ray.data.range(1).random_sample(10)\n\n\[email protected](\"pipelined\", [False, True])\[email protected](\"use_push_based_shuffle\", [False, True])\ndef test_random_shuffle(shutdown_only, pipelined, use_push_based_shuffle):\n ctx = ray.data.context.DatasetContext.get_current()\n\n try:\n original = ctx.use_push_based_shuffle\n ctx.use_push_based_shuffle = use_push_based_shuffle\n\n def range(n, parallelism=200):\n ds = ray.data.range(n, parallelism=parallelism)\n if pipelined:\n pipe = ds.repeat(2)\n pipe.random_shuffle = pipe.random_shuffle_each_window\n return pipe\n else:\n return ds\n\n r1 = range(100).random_shuffle().take(999)\n r2 = range(100).random_shuffle().take(999)\n assert r1 != r2, (r1, r2)\n\n r1 = range(100, parallelism=1).random_shuffle().take(999)\n r2 = range(100, parallelism=1).random_shuffle().take(999)\n assert r1 != r2, (r1, r2)\n\n r1 = range(100).random_shuffle(num_blocks=1).take(999)\n r2 = range(100).random_shuffle(num_blocks=1).take(999)\n assert r1 != r2, (r1, r2)\n\n r0 = range(100, parallelism=5).take(999)\n r1 = range(100, parallelism=5).random_shuffle(seed=0).take(999)\n r2 = range(100, parallelism=5).random_shuffle(seed=0).take(999)\n r3 = range(100, parallelism=5).random_shuffle(seed=12345).take(999)\n assert r1 == r2, (r1, r2)\n assert r1 != r0, (r1, r0)\n assert r1 != r3, (r1, r3)\n\n r0 = ray.data.range_table(100, parallelism=5).take(999)\n r1 = ray.data.range_table(100, parallelism=5).random_shuffle(seed=0).take(999)\n r2 = ray.data.range_table(100, parallelism=5).random_shuffle(seed=0).take(999)\n assert r1 == r2, (r1, r2)\n assert r1 != r0, (r1, r0)\n\n # Test move.\n ds = range(100, parallelism=2)\n r1 = ds.random_shuffle().take(999)\n if pipelined:\n with pytest.raises(RuntimeError):\n ds = ds.map(lambda x: x).take(999)\n else:\n ds = ds.map(lambda x: x).take(999)\n r2 = range(100).random_shuffle().take(999)\n assert r1 != r2, (r1, r2)\n\n # Test empty dataset.\n ds = ray.data.from_items([])\n r1 = ds.random_shuffle()\n assert r1.count() == 0\n assert r1.take() == ds.take()\n finally:\n ctx.use_push_based_shuffle = original\n\n\ndef test_random_shuffle_check_random(shutdown_only):\n # Rows from the same input should not be contiguous in the final output.\n num_files = 10\n num_rows = 100\n items = [i for i in range(num_files) for _ in range(num_rows)]\n ds = ray.data.from_items(items, parallelism=num_files)\n out = ds.random_shuffle().take(num_files * num_rows)\n for i in range(num_files):\n part = out[i * num_rows : (i + 1) * num_rows]\n seen = set()\n num_contiguous = 1\n prev = -1\n for x in part:\n if prev != x:\n prev = x\n num_contiguous = 1\n else:\n num_contiguous += 1\n assert num_contiguous < (\n num_rows / num_files\n ), f\"{part} contains too many contiguous rows from same input block\"\n seen.add(x)\n assert (\n set(range(num_files)) == seen\n ), f\"{part} does not contain elements from all input blocks\"\n\n # Rows from the same input should appear in a different order in the\n # output.\n num_files = 10\n num_rows = 100\n items = [j for i in range(num_files) for j in range(num_rows)]\n ds = ray.data.from_items(items, parallelism=num_files)\n out = ds.random_shuffle().take(num_files * num_rows)\n for i in range(num_files):\n part = out[i * num_rows : (i + 1) * num_rows]\n num_increasing = 0\n prev = -1\n for x in part:\n if x >= prev:\n num_increasing += 1\n else:\n assert num_increasing < (\n num_rows / num_files\n ), f\"{part} contains non-shuffled rows from input blocks\"\n num_increasing = 0\n prev = x\n\n\[email protected](\"use_push_based_shuffle\", [False, True])\ndef test_random_shuffle_spread(ray_start_cluster, use_push_based_shuffle):\n ctx = ray.data.context.DatasetContext.get_current()\n try:\n original = ctx.use_push_based_shuffle\n ctx.use_push_based_shuffle = use_push_based_shuffle\n\n cluster = ray_start_cluster\n cluster.add_node(\n resources={\"bar:1\": 100},\n num_cpus=10,\n _system_config={\"max_direct_call_object_size\": 0},\n )\n cluster.add_node(resources={\"bar:2\": 100}, num_cpus=10)\n cluster.add_node(resources={\"bar:3\": 100}, num_cpus=0)\n\n ray.init(cluster.address)\n\n @ray.remote\n def get_node_id():\n return ray.get_runtime_context().node_id.hex()\n\n node1_id = ray.get(get_node_id.options(resources={\"bar:1\": 1}).remote())\n node2_id = ray.get(get_node_id.options(resources={\"bar:2\": 1}).remote())\n\n ds = ray.data.range(100, parallelism=2).random_shuffle()\n blocks = ds.get_internal_block_refs()\n ray.wait(blocks, num_returns=len(blocks), fetch_local=False)\n location_data = ray.experimental.get_object_locations(blocks)\n locations = []\n for block in blocks:\n locations.extend(location_data[block][\"node_ids\"])\n assert \"2 nodes used\" in ds.stats()\n\n if not use_push_based_shuffle:\n # We don't check this for push-based shuffle since it will try to\n # colocate reduce tasks to improve locality.\n assert set(locations) == {node1_id, node2_id}\n\n finally:\n ctx.use_push_based_shuffle = original\n\n\ndef test_parquet_read_spread(ray_start_cluster, tmp_path):\n cluster = ray_start_cluster\n cluster.add_node(\n resources={\"bar:1\": 100},\n num_cpus=10,\n _system_config={\"max_direct_call_object_size\": 0},\n )\n cluster.add_node(resources={\"bar:2\": 100}, num_cpus=10)\n cluster.add_node(resources={\"bar:3\": 100}, num_cpus=0)\n\n ray.init(cluster.address)\n\n @ray.remote\n def get_node_id():\n return ray.get_runtime_context().node_id.hex()\n\n node1_id = ray.get(get_node_id.options(resources={\"bar:1\": 1}).remote())\n node2_id = ray.get(get_node_id.options(resources={\"bar:2\": 1}).remote())\n\n data_path = str(tmp_path)\n df1 = pd.DataFrame({\"one\": list(range(100)), \"two\": list(range(100, 200))})\n path1 = os.path.join(data_path, \"test1.parquet\")\n df1.to_parquet(path1)\n df2 = pd.DataFrame({\"one\": list(range(300, 400)), \"two\": list(range(400, 500))})\n path2 = os.path.join(data_path, \"test2.parquet\")\n df2.to_parquet(path2)\n\n ds = ray.data.read_parquet(data_path)\n\n # Force reads.\n blocks = ds.get_internal_block_refs()\n assert len(blocks) == 2\n\n ray.wait(blocks, num_returns=len(blocks), fetch_local=False)\n location_data = ray.experimental.get_object_locations(blocks)\n locations = []\n for block in blocks:\n locations.extend(location_data[block][\"node_ids\"])\n assert set(locations) == {node1_id, node2_id}\n\n\[email protected]\nclass Counter:\n def __init__(self):\n self.value = 0\n\n def increment(self):\n self.value += 1\n return self.value\n\n\nclass FlakyCSVDatasource(CSVDatasource):\n def __init__(self):\n self.counter = Counter.remote()\n\n def _read_stream(self, f: \"pa.NativeFile\", path: str, **reader_args):\n count = self.counter.increment.remote()\n if ray.get(count) == 1:\n raise ValueError(\"oops\")\n else:\n for block in CSVDatasource._read_stream(self, f, path, **reader_args):\n yield block\n\n def _write_block(self, f: \"pa.NativeFile\", block: BlockAccessor, **writer_args):\n count = self.counter.increment.remote()\n if ray.get(count) == 1:\n raise ValueError(\"oops\")\n else:\n CSVDatasource._write_block(self, f, block, **writer_args)\n\n\ndef test_dataset_retry_exceptions(ray_start_regular, local_path):\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n path1 = os.path.join(local_path, \"test1.csv\")\n df1.to_csv(path1, index=False, storage_options={})\n ds1 = ray.data.read_datasource(FlakyCSVDatasource(), parallelism=1, paths=path1)\n ds1.write_datasource(FlakyCSVDatasource(), path=local_path, dataset_uuid=\"data\")\n assert df1.equals(\n pd.read_csv(os.path.join(local_path, \"data_000000.csv\"), storage_options={})\n )\n\n counter = Counter.remote()\n\n def flaky_mapper(x):\n count = counter.increment.remote()\n if ray.get(count) == 1:\n raise ValueError(\"oops\")\n else:\n return ray.get(count)\n\n assert sorted(ds1.map(flaky_mapper).take()) == [2, 3, 4]\n\n with pytest.raises(ValueError):\n ray.data.read_datasource(\n FlakyCSVDatasource(),\n parallelism=1,\n paths=path1,\n ray_remote_args={\"retry_exceptions\": False},\n ).take()\n\n\ndef test_datasource(ray_start_regular):\n source = ray.data.datasource.RandomIntRowDatasource()\n assert len(ray.data.read_datasource(source, n=10, num_columns=2).take()) == 10\n source = ray.data.datasource.RangeDatasource()\n assert ray.data.read_datasource(source, n=10).take() == list(range(10))\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.testing.assert_equal", "pandas.concat", "pandas.testing.assert_series_equal", "numpy.testing.assert_array_almost_equal", "numpy.array_equal", "pandas.Series", "torch.cat", "numpy.arange", "numpy.int32", "pandas.DataFrame", "torch.tensor", "numpy.testing.assert_array_equal", "numpy.concatenate", "numpy.sort", "numpy.random.rand", "numpy.array", "tensorflow.TensorSpec", "numpy.random.randint" ] ]
normster/weird-airflow
[ "c39e4f6aa45d0f7581410e9c0f42ab56cbf30785" ]
[ "airflow/www/views.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom past.utils import old_div\nfrom past.builtins import basestring, unicode\n\nimport os\nimport pkg_resources\nimport socket\nimport importlib\nfrom functools import wraps\nfrom datetime import datetime, timedelta\nimport dateutil.parser\nimport copy\nfrom itertools import chain, product\nimport json\nfrom lxml import html\n\nimport inspect\nimport traceback\n\nimport sqlalchemy as sqla\nfrom sqlalchemy import or_, desc, and_, union_all\n\nfrom flask import (\n redirect, url_for, request, Markup, Response, current_app, render_template, make_response)\nfrom flask_admin import BaseView, expose, AdminIndexView\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin.actions import action\nfrom flask_admin.babel import lazy_gettext\nfrom flask_admin.tools import iterdecode\nfrom flask_login import flash\nfrom flask._compat import PY2\n\nimport jinja2\nimport markdown\nimport nvd3\n\nfrom wtforms import (\n Form, SelectField, TextAreaField, PasswordField, StringField, validators)\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import HtmlFormatter\n\nimport airflow\nfrom airflow import configuration as conf\nfrom airflow import models\nfrom airflow import settings\nfrom airflow.exceptions import AirflowException\nfrom airflow.settings import Session\nfrom airflow.models import XCom\n\nfrom airflow.models import BaseOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\n\nfrom airflow.utils.logging import LoggingMixin\nfrom airflow.utils.json import json_ser\nfrom airflow.utils.state import State\nfrom airflow.utils.db import provide_session\nfrom airflow.utils.helpers import alchemy_to_dict\nfrom airflow.utils import logging as log_utils\nfrom airflow.www import utils as wwwutils\nfrom airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm\n\nQUERY_LIMIT = 100000\nCHART_LIMIT = 200000\n\ndagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))\n\nlogin_required = airflow.login.login_required\ncurrent_user = airflow.login.current_user\nlogout_user = airflow.login.logout_user\n\nFILTER_BY_OWNER = False\n\nDEFAULT_SENSITIVE_VARIABLE_FIELDS = (\n 'password',\n 'secret',\n 'passwd',\n 'authorization',\n 'api_key',\n 'apikey',\n 'access_token',\n)\n\nif conf.getboolean('webserver', 'FILTER_BY_OWNER'):\n # filter_by_owner if authentication is enabled and filter_by_owner is true\n FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']\n\n\ndef dag_link(v, c, m, p):\n url = url_for(\n 'airflow.graph',\n dag_id=m.dag_id)\n return Markup(\n '<a href=\"{url}\">{m.dag_id}</a>'.format(**locals()))\n\n\ndef log_url_formatter(v, c, m, p):\n return Markup(\n '<a href=\"{m.log_url}\">'\n ' <span class=\"glyphicon glyphicon-book\" aria-hidden=\"true\">'\n '</span></a>').format(**locals())\n\n\ndef task_instance_link(v, c, m, p):\n url = url_for(\n 'airflow.task',\n dag_id=m.dag_id,\n task_id=m.task_id,\n execution_date=m.execution_date.isoformat())\n url_root = url_for(\n 'airflow.graph',\n dag_id=m.dag_id,\n root=m.task_id,\n execution_date=m.execution_date.isoformat())\n return Markup(\n \"\"\"\n <span style=\"white-space: nowrap;\">\n <a href=\"{url}\">{m.task_id}</a>\n <a href=\"{url_root}\" title=\"Filter on this task and upstream\">\n <span class=\"glyphicon glyphicon-filter\" style=\"margin-left: 0px;\"\n aria-hidden=\"true\"></span>\n </a>\n </span>\n \"\"\".format(**locals()))\n\n\ndef state_token(state):\n color = State.color(state)\n return Markup(\n '<span class=\"label\" style=\"background-color:{color};\">'\n '{state}</span>'.format(**locals()))\n\n\ndef state_f(v, c, m, p):\n return state_token(m.state)\n\n\ndef duration_f(v, c, m, p):\n if m.end_date and m.duration:\n return timedelta(seconds=m.duration)\n\n\ndef datetime_f(v, c, m, p):\n attr = getattr(m, p)\n dttm = attr.isoformat() if attr else ''\n if datetime.now().isoformat()[:4] == dttm[:4]:\n dttm = dttm[5:]\n return Markup(\"<nobr>{}</nobr>\".format(dttm))\n\n\ndef nobr_f(v, c, m, p):\n return Markup(\"<nobr>{}</nobr>\".format(getattr(m, p)))\n\n\ndef label_link(v, c, m, p):\n try:\n default_params = eval(m.default_params)\n except:\n default_params = {}\n url = url_for(\n 'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,\n **default_params)\n return Markup(\"<a href='{url}'>{m.label}</a>\".format(**locals()))\n\n\ndef pool_link(v, c, m, p):\n url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool\n return Markup(\"<a href='{url}'>{m.pool}</a>\".format(**locals()))\n\n\ndef pygment_html_render(s, lexer=lexers.TextLexer):\n return highlight(\n s,\n lexer(),\n HtmlFormatter(linenos=True),\n )\n\n\ndef render(obj, lexer):\n out = \"\"\n if isinstance(obj, basestring):\n out += pygment_html_render(obj, lexer)\n elif isinstance(obj, (tuple, list)):\n for i, s in enumerate(obj):\n out += \"<div>List item #{}</div>\".format(i)\n out += \"<div>\" + pygment_html_render(s, lexer) + \"</div>\"\n elif isinstance(obj, dict):\n for k, v in obj.items():\n out += '<div>Dict item \"{}\"</div>'.format(k)\n out += \"<div>\" + pygment_html_render(v, lexer) + \"</div>\"\n return out\n\n\ndef wrapped_markdown(s):\n return '<div class=\"rich_doc\">' + markdown.markdown(s) + \"</div>\"\n\n\nattr_renderer = {\n 'bash_command': lambda x: render(x, lexers.BashLexer),\n 'hql': lambda x: render(x, lexers.SqlLexer),\n 'sql': lambda x: render(x, lexers.SqlLexer),\n 'doc': lambda x: render(x, lexers.TextLexer),\n 'doc_json': lambda x: render(x, lexers.JsonLexer),\n 'doc_rst': lambda x: render(x, lexers.RstLexer),\n 'doc_yaml': lambda x: render(x, lexers.YamlLexer),\n 'doc_md': wrapped_markdown,\n 'python_callable': lambda x: render(\n inspect.getsource(x), lexers.PythonLexer),\n}\n\n\ndef data_profiling_required(f):\n '''\n Decorator for views requiring data profiling access\n '''\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if (\n current_app.config['LOGIN_DISABLED'] or\n (not current_user.is_anonymous() and current_user.data_profiling())\n ):\n return f(*args, **kwargs)\n else:\n flash(\"This page requires data profiling privileges\", \"error\")\n return redirect(url_for('admin.index'))\n return decorated_function\n\n\ndef fused_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=running')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.used_slots()))\n\n\ndef fqueued_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=queued&sort=10&desc=1')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.queued_slots()))\n\n\ndef recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):\n if isinstance(tasks, list):\n for task in tasks:\n recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)\n return\n if isinstance(tasks, SubDagOperator):\n subtasks = tasks.subdag.tasks\n dag_ids.append(tasks.subdag.dag_id)\n for subtask in subtasks:\n if subtask.task_id not in task_ids:\n task_ids.append(subtask.task_id)\n task_id_to_dag[subtask.task_id] = tasks.subdag\n recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)\n if isinstance(tasks, BaseOperator):\n task_id_to_dag[tasks.task_id] = tasks.dag\n\n\ndef should_hide_value_for_key(key_name):\n return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \\\n and conf.getboolean('admin', 'hide_sensitive_variable_fields')\n\n\nclass Airflow(BaseView):\n\n def is_visible(self):\n return False\n\n @expose('/')\n @login_required\n def index(self):\n return self.render('airflow/dags.html')\n\n @expose('/chart_data')\n @data_profiling_required\n @wwwutils.gzipped\n # @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)\n def chart_data(self):\n from airflow import macros\n import pandas as pd\n session = settings.Session()\n chart_id = request.args.get('chart_id')\n csv = request.args.get('csv') == \"true\"\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n db = session.query(\n models.Connection).filter_by(conn_id=chart.conn_id).first()\n session.expunge_all()\n session.commit()\n session.close()\n\n payload = {}\n payload['state'] = 'ERROR'\n payload['error'] = ''\n\n # Processing templated fields\n try:\n args = eval(chart.default_params)\n if type(args) is not type(dict()):\n raise AirflowException('Not a dict')\n except:\n args = {}\n payload['error'] += (\n \"Default params is not valid, string has to evaluate as \"\n \"a Python dictionary. \")\n\n request_dict = {k: request.args.get(k) for k in request.args}\n args.update(request_dict)\n args['macros'] = macros\n sql = jinja2.Template(chart.sql).render(**args)\n label = jinja2.Template(chart.label).render(**args)\n payload['sql_html'] = Markup(highlight(\n sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n payload['label'] = label\n\n pd.set_option('display.max_colwidth', 100)\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(\n wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))\n df = df.fillna(0)\n except Exception as e:\n payload['error'] += \"SQL execution failed. Details: \" + str(e)\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n if not payload['error'] and len(df) == CHART_LIMIT:\n payload['warning'] = (\n \"Data has been truncated to {0}\"\n \" rows. Expect incomplete results.\").format(CHART_LIMIT)\n\n if not payload['error'] and len(df) == 0:\n payload['error'] += \"Empty result set. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'series' and\n chart.chart_type != \"datatable\" and\n len(df.columns) < 3):\n payload['error'] += \"SQL needs to return at least 3 columns. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'columns'and\n len(df.columns) < 2):\n payload['error'] += \"SQL needs to return at least 2 columns. \"\n elif not payload['error']:\n import numpy as np\n chart_type = chart.chart_type\n\n data = None\n if chart.show_datatable or chart_type == \"datatable\":\n data = df.to_dict(orient=\"split\")\n data['columns'] = [{'title': c} for c in data['columns']]\n payload['data'] = data\n\n # Trying to convert time to something Highcharts likes\n x_col = 1 if chart.sql_layout == 'series' else 0\n if chart.x_is_date:\n try:\n # From string to datetime\n df[df.columns[x_col]] = pd.to_datetime(\n df[df.columns[x_col]])\n df[df.columns[x_col]] = df[df.columns[x_col]].apply(\n lambda x: int(x.strftime(\"%s\")) * 1000)\n except Exception as e:\n payload['error'] = \"Time conversion failed\"\n\n if chart_type == 'datatable':\n payload['state'] = 'SUCCESS'\n return wwwutils.json_response(payload)\n else:\n if chart.sql_layout == 'series':\n # User provides columns (series, x, y)\n xaxis_label = df.columns[1]\n yaxis_label = df.columns[2]\n df[df.columns[2]] = df[df.columns[2]].astype(np.float)\n df = df.pivot_table(\n index=df.columns[1],\n columns=df.columns[0],\n values=df.columns[2], aggfunc=np.sum)\n else:\n # User provides columns (x, y, metric1, metric2, ...)\n xaxis_label = df.columns[0]\n yaxis_label = 'y'\n df.index = df[df.columns[0]]\n df = df.sort(df.columns[0])\n del df[df.columns[0]]\n for col in df.columns:\n df[col] = df[col].astype(np.float)\n\n df = df.fillna(0)\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n NVd3ChartClass = getattr(nvd3, NVd3ChartClass)\n nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)\n\n for col in df.columns:\n nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())\n try:\n nvd3_chart.buildcontent()\n payload['chart_type'] = nvd3_chart.__class__.__name__\n payload['htmlcontent'] = nvd3_chart.htmlcontent\n except Exception as e:\n payload['error'] = str(e)\n\n payload['state'] = 'SUCCESS'\n payload['request_dict'] = request_dict\n return wwwutils.json_response(payload)\n\n @expose('/chart')\n @data_profiling_required\n def chart(self):\n session = settings.Session()\n chart_id = request.args.get('chart_id')\n embed = request.args.get('embed')\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n session.expunge_all()\n session.commit()\n session.close()\n\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n if not NVd3ChartClass:\n flash(\n \"Not supported anymore as the license was incompatible, \"\n \"sorry\",\n \"danger\")\n redirect('/admin/chart/')\n\n sql = \"\"\n if chart.show_sql:\n sql = Markup(highlight(\n chart.sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/nvd3.html',\n chart=chart,\n title=\"Airflow - Chart\",\n sql=sql,\n label=chart.label,\n embed=embed)\n\n @expose('/dag_stats')\n #@login_required\n def dag_stats(self):\n states = [\n State.SUCCESS,\n State.RUNNING,\n State.FAILED,\n State.UPSTREAM_FAILED,\n State.UP_FOR_RETRY,\n State.QUEUED,\n ]\n task_ids = []\n dag_ids = []\n for dag in dagbag.dags.values():\n task_ids += dag.task_ids\n if not dag.is_subdag:\n dag_ids.append(dag.dag_id)\n\n TI = models.TaskInstance\n DagRun = models.DagRun\n session = Session()\n\n LastDagRun = (\n session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))\n .group_by(DagRun.dag_id)\n .subquery('last_dag_run')\n )\n RunningDagRun = (\n session.query(DagRun.dag_id, DagRun.execution_date)\n .filter(DagRun.state == State.RUNNING)\n .subquery('running_dag_run')\n )\n\n # Select all task_instances from active dag_runs.\n # If no dag_run is active, return task instances from most recent dag_run.\n LastTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(LastDagRun, and_(\n LastDagRun.c.dag_id == TI.dag_id,\n LastDagRun.c.execution_date == TI.execution_date))\n .filter(TI.task_id.in_(task_ids))\n .filter(TI.dag_id.in_(dag_ids))\n )\n RunningTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(RunningDagRun, and_(\n RunningDagRun.c.dag_id == TI.dag_id,\n RunningDagRun.c.execution_date == TI.execution_date))\n .filter(TI.task_id.in_(task_ids))\n .filter(TI.dag_id.in_(dag_ids))\n )\n\n UnionTI = union_all(LastTI, RunningTI).alias('union_ti')\n qry = (\n session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())\n .group_by(UnionTI.c.dag_id, UnionTI.c.state)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n session.commit()\n session.close()\n\n payload = {}\n for dag in dagbag.dags.values():\n payload[dag.safe_dag_id] = []\n for state in states:\n try:\n count = data[dag.dag_id][state]\n except:\n count = 0\n d = {\n 'state': state,\n 'count': count,\n 'dag_id': dag.dag_id,\n 'color': State.color(state)\n }\n payload[dag.safe_dag_id].append(d)\n return wwwutils.json_response(payload)\n\n @expose('/code')\n @login_required\n def code(self):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = dag_id\n try:\n m = importlib.import_module(dag.module_name)\n code = inspect.getsource(m)\n html_code = highlight(\n code, lexers.PythonLexer(), HtmlFormatter(linenos=True))\n except IOError as e:\n html_code = str(e)\n\n return self.render(\n 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,\n root=request.args.get('root'),\n demo_mode=conf.getboolean('webserver', 'demo_mode'))\n\n @expose('/dag_details')\n @login_required\n def dag_details(self):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = \"DAG details\"\n\n session = settings.Session()\n TI = models.TaskInstance\n states = (\n session.query(TI.state, sqla.func.count(TI.dag_id))\n .filter(TI.dag_id == dag_id)\n .group_by(TI.state)\n .all()\n )\n return self.render(\n 'airflow/dag_details.html',\n dag=dag, title=title, states=states, State=State)\n\n @current_app.errorhandler(404)\n def circles(self):\n return render_template(\n 'airflow/circles.html', hostname=socket.getfqdn()), 404\n\n @current_app.errorhandler(500)\n def show_traceback(self):\n from airflow.utils import asciiart as ascii_\n return render_template(\n 'airflow/traceback.html',\n hostname=socket.getfqdn(),\n nukular=ascii_.nukular,\n info=traceback.format_exc()), 500\n\n @expose('/sandbox')\n @login_required\n def sandbox(self):\n title = \"Sandbox Suggested Configuration\"\n cfg_loc = conf.AIRFLOW_CONFIG + '.sandbox'\n f = open(cfg_loc, 'r')\n config = f.read()\n f.close()\n code_html = Markup(highlight(\n config,\n lexers.IniLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/code.html',\n code_html=code_html, title=title, subtitle=cfg_loc)\n\n @expose('/noaccess')\n def noaccess(self):\n return self.render('airflow/noaccess.html')\n\n @expose('/headers')\n def headers(self):\n d = {\n 'headers': {k: v for k, v in request.headers},\n }\n if hasattr(current_user, 'is_superuser'):\n d['is_superuser'] = current_user.is_superuser()\n d['data_profiling'] = current_user.data_profiling()\n d['is_anonymous'] = current_user.is_anonymous()\n d['is_authenticated'] = current_user.is_authenticated()\n if hasattr(current_user, 'username'):\n d['username'] = current_user.username\n return wwwutils.json_response(d)\n\n @expose('/pickle_info')\n def pickle_info(self):\n d = {}\n dag_id = request.args.get('dag_id')\n dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()\n for dag in dags:\n if not dag.is_subdag:\n d[dag.dag_id] = dag.pickle_info()\n return wwwutils.json_response(d)\n\n @expose('/login', methods=['GET', 'POST'])\n def login(self):\n return airflow.login.login(self, request)\n\n @expose('/logout')\n def logout(self):\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('admin.index'))\n\n @expose('/rendered')\n @login_required\n @wwwutils.action_logging\n def rendered(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = dateutil.parser.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n task = copy.copy(dag.get_task(task_id))\n ti = models.TaskInstance(task=task, execution_date=dttm)\n try:\n ti.render_templates()\n except Exception as e:\n flash(\"Error rendering template: \" + str(e), \"error\")\n title = \"Rendered Template\"\n html_dict = {}\n for template_field in task.__class__.template_fields:\n content = getattr(task, template_field)\n if template_field in attr_renderer:\n html_dict[template_field] = attr_renderer[template_field](content)\n else:\n html_dict[template_field] = (\n \"<pre><code>\" + str(content) + \"</pre></code>\")\n\n return self.render(\n 'airflow/ti_code.html',\n html_dict=html_dict,\n dag=dag,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n title=title,)\n\n @expose('/log')\n @login_required\n @wwwutils.action_logging\n def log(self):\n BASE_LOG_FOLDER = os.path.expanduser(\n conf.get('core', 'BASE_LOG_FOLDER'))\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dag = dagbag.get_dag(dag_id)\n log_relative = \"{dag_id}/{task_id}/{execution_date}\".format(\n **locals())\n loc = os.path.join(BASE_LOG_FOLDER, log_relative)\n loc = loc.format(**locals())\n log = \"\"\n TI = models.TaskInstance\n session = Session()\n dttm = dateutil.parser.parse(execution_date)\n ti = session.query(TI).filter(\n TI.dag_id == dag_id, TI.task_id == task_id,\n TI.execution_date == dttm).first()\n dttm = dateutil.parser.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n\n if ti:\n host = ti.hostname\n log_loaded = False\n\n if os.path.exists(loc):\n try:\n f = open(loc)\n log += \"\".join(f.readlines())\n f.close()\n log_loaded = True\n except:\n log = \"*** Failed to load local log file: {0}.\\n\".format(loc)\n else:\n WORKER_LOG_SERVER_PORT = \\\n conf.get('celery', 'WORKER_LOG_SERVER_PORT')\n url = os.path.join(\n \"http://{host}:{WORKER_LOG_SERVER_PORT}/log\", log_relative\n ).format(**locals())\n log += \"*** Log file isn't local.\\n\"\n log += \"*** Fetching here: {url}\\n\".format(**locals())\n try:\n import requests\n response = requests.get(url)\n response.raise_for_status()\n log += '\\n' + response.text\n log_loaded = True\n except:\n log += \"*** Failed to fetch log file from worker.\\n\".format(\n **locals())\n\n if not log_loaded:\n # load remote logs\n remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')\n remote_log = os.path.join(remote_log_base, log_relative)\n log += '\\n*** Reading remote logs...\\n'\n\n # S3\n if remote_log.startswith('s3:/'):\n log += log_utils.S3Log().read(remote_log, return_error=True)\n\n # GCS\n elif remote_log.startswith('gs:/'):\n log += log_utils.GCSLog().read(remote_log, return_error=True)\n\n # unsupported\n elif remote_log:\n log += '*** Unsupported remote log location.'\n\n session.commit()\n session.close()\n\n if PY2 and not isinstance(log, unicode):\n log = log.decode('utf-8')\n\n title = \"Log\"\n\n return self.render(\n 'airflow/ti_code.html',\n code=log, dag=dag, title=title, task_id=task_id,\n execution_date=execution_date, form=form)\n\n @expose('/task')\n @login_required\n @wwwutils.action_logging\n def task(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = dateutil.parser.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n task = dag.get_task(task_id)\n task = copy.copy(task)\n task.resolve_template_files()\n\n attributes = []\n for attr_name in dir(task):\n if not attr_name.startswith('_'):\n attr = getattr(task, attr_name)\n if type(attr) != type(self.task) and \\\n attr_name not in attr_renderer:\n attributes.append((attr_name, str(attr)))\n\n title = \"Task Details\"\n # Color coding the special attributes that are code\n special_attrs_rendered = {}\n for attr_name in attr_renderer:\n if hasattr(task, attr_name):\n source = getattr(task, attr_name)\n special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)\n\n return self.render(\n 'airflow/task.html',\n attributes=attributes,\n task_id=task_id,\n execution_date=execution_date,\n special_attrs_rendered=special_attrs_rendered,\n form=form,\n dag=dag, title=title)\n\n @expose('/xcom')\n @login_required\n @wwwutils.action_logging\n def xcom(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = dateutil.parser.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n\n session = Session()\n xcomlist = session.query(XCom).filter(\n XCom.dag_id == dag_id, XCom.task_id == task_id,\n XCom.execution_date == dttm).all()\n\n attributes = []\n for xcom in xcomlist:\n if not xcom.key.startswith('_'):\n attributes.append((xcom.key, xcom.value))\n\n title = \"XCom\"\n return self.render(\n 'airflow/xcom.html',\n attributes=attributes,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n dag=dag, title=title)\n\n @expose('/run')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def run(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = dateutil.parser.parse(execution_date)\n force = request.args.get('force') == \"true\"\n deps = request.args.get('deps') == \"true\"\n\n try:\n from airflow.executors import DEFAULT_EXECUTOR as executor\n from airflow.executors import CeleryExecutor\n if not isinstance(executor, CeleryExecutor):\n flash(\"Only works with the CeleryExecutor, sorry\", \"error\")\n return redirect(origin)\n except ImportError:\n # in case CeleryExecutor cannot be imported it is not active either\n flash(\"Only works with the CeleryExecutor, sorry\", \"error\")\n return redirect(origin)\n\n ti = models.TaskInstance(task=task, execution_date=execution_date)\n executor.start()\n executor.queue_task_instance(\n ti, force=force, ignore_dependencies=deps)\n executor.heartbeat()\n flash(\n \"Sent {} to the message queue, \"\n \"it should start any moment now.\".format(ti))\n return redirect(origin)\n\n @expose('/clear')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def clear(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = dateutil.parser.parse(execution_date)\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n recursive = request.args.get('recursive') == \"true\"\n\n dag = dag.sub_dag(\n task_regex=r\"^{0}$\".format(task_id),\n include_downstream=downstream,\n include_upstream=upstream)\n\n end_date = execution_date if not future else None\n start_date = execution_date if not past else None\n if confirmed:\n count = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive)\n\n flash(\"{0} task instances have been cleared\".format(count))\n return redirect(origin)\n else:\n tis = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n dry_run=True)\n if not tis:\n flash(\"No task instances to clear\", 'error')\n response = redirect(origin)\n else:\n details = \"\\n\".join([str(t) for t in tis])\n\n response = self.render(\n 'airflow/confirm.html',\n message=(\n \"Here's the list of task instances you are about \"\n \"to clear:\"),\n details=details,)\n\n return response\n\n @expose('/blocked')\n @login_required\n def blocked(self):\n session = settings.Session()\n DR = models.DagRun\n dags = (\n session.query(DR.dag_id, sqla.func.count(DR.id))\n .filter(DR.state == State.RUNNING)\n .group_by(DR.dag_id)\n .all()\n )\n payload = []\n for dag_id, active_dag_runs in dags:\n max_active_runs = 0\n if dag_id in dagbag.dags:\n max_active_runs = dagbag.dags[dag_id].max_active_runs\n payload.append({\n 'dag_id': dag_id,\n 'active_dag_run': active_dag_runs,\n 'max_active_runs': max_active_runs,\n })\n return wwwutils.json_response(payload)\n\n @expose('/success')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def success(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = dateutil.parser.parse(execution_date)\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n recursive = request.args.get('recursive') == \"true\"\n MAX_PERIODS = 1000\n\n # Flagging tasks as successful\n session = settings.Session()\n task_ids = [task_id]\n dag_ids = [dag_id]\n task_id_to_dag = {\n task_id: dag\n }\n end_date = ((dag.latest_execution_date or datetime.now())\n if future else execution_date)\n\n if 'start_date' in dag.default_args:\n start_date = dag.default_args['start_date']\n elif dag.start_date:\n start_date = dag.start_date\n else:\n start_date = execution_date\n\n start_date = execution_date if not past else start_date\n\n if recursive:\n recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)\n\n if downstream:\n relatives = task.get_flat_relatives(upstream=False)\n task_ids += [t.task_id for t in relatives]\n if recursive:\n recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)\n if upstream:\n relatives = task.get_flat_relatives(upstream=False)\n task_ids += [t.task_id for t in relatives]\n if recursive:\n recurse_tasks(relatives, task_ids, dag_ids, task_id_to_dag)\n TI = models.TaskInstance\n\n if dag.schedule_interval == '@once':\n dates = [start_date]\n else:\n dates = dag.date_range(start_date, end_date=end_date)\n\n tis = session.query(TI).filter(\n TI.dag_id.in_(dag_ids),\n TI.execution_date.in_(dates),\n TI.task_id.in_(task_ids)).all()\n tis_to_change = session.query(TI).filter(\n TI.dag_id.in_(dag_ids),\n TI.execution_date.in_(dates),\n TI.task_id.in_(task_ids),\n TI.state != State.SUCCESS).all()\n tasks = list(product(task_ids, dates))\n tis_to_create = list(\n set(tasks) -\n set([(ti.task_id, ti.execution_date) for ti in tis]))\n\n tis_all_altered = list(chain(\n [(ti.task_id, ti.execution_date) for ti in tis_to_change],\n tis_to_create))\n\n if len(tis_all_altered) > MAX_PERIODS:\n flash(\"Too many tasks at once (>{0})\".format(\n MAX_PERIODS), 'error')\n return redirect(origin)\n\n if confirmed:\n for ti in tis_to_change:\n ti.state = State.SUCCESS\n session.commit()\n\n for task_id, task_execution_date in tis_to_create:\n ti = TI(\n task=task_id_to_dag[task_id].get_task(task_id),\n execution_date=task_execution_date,\n state=State.SUCCESS)\n session.add(ti)\n session.commit()\n\n session.commit()\n session.close()\n flash(\"Marked success on {} task instances\".format(\n len(tis_all_altered)))\n\n return redirect(origin)\n else:\n if not tis_all_altered:\n flash(\"No task instances to mark as successful\", 'error')\n response = redirect(origin)\n else:\n tis = []\n for task_id, task_execution_date in tis_all_altered:\n tis.append(TI(\n task=task_id_to_dag[task_id].get_task(task_id),\n execution_date=task_execution_date,\n state=State.SUCCESS))\n details = \"\\n\".join([str(t) for t in tis])\n\n response = self.render(\n 'airflow/confirm.html',\n message=(\n \"Here's the list of task instances you are about \"\n \"to mark as successful:\"),\n details=details,)\n return response\n\n @expose('/tree')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n def tree(self):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_downstream=False,\n include_upstream=True)\n\n session = settings.Session()\n\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = dateutil.parser.parse(base_date)\n else:\n base_date = dag.latest_execution_date or datetime.now()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n DR = models.DagRun\n dag_runs = (\n session.query(DR)\n .filter(\n DR.dag_id==dag.dag_id,\n DR.execution_date<=base_date,\n DR.execution_date>=min_date)\n .all()\n )\n dag_runs = {\n dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n task_instances = {}\n for ti in tis:\n tid = alchemy_to_dict(ti)\n dr = dag_runs.get(ti.execution_date)\n tid['external_trigger'] = dr['external_trigger'] if dr else False\n task_instances[(ti.task_id, ti.execution_date)] = tid\n\n expanded = []\n # The default recursion traces every path so that tree view has full\n # expand/collapse functionality. After 5,000 nodes we stop and fall\n # back on a quick DFS search for performance. See PR #320.\n node_count = [0]\n node_limit = 5000 / max(1, len(dag.roots))\n\n def recurse_nodes(task, visited):\n visited.add(task)\n node_count[0] += 1\n\n children = [\n recurse_nodes(t, visited) for t in task.upstream_list\n if node_count[0] < node_limit or t not in visited]\n\n # D3 tree uses children vs _children to define what is\n # expanded or not. The following block makes it such that\n # repeated nodes are collapsed by default.\n children_key = 'children'\n if task.task_id not in expanded:\n expanded.append(task.task_id)\n elif children:\n children_key = \"_children\"\n\n def set_duration(tid):\n if isinstance(tid, dict) and tid.get(\"state\") == State.RUNNING:\n d = datetime.now() - dateutil.parser.parse(tid[\"start_date\"])\n tid[\"duration\"] = d.total_seconds()\n return tid\n\n return {\n 'name': task.task_id,\n 'instances': [\n set_duration(task_instances.get((task.task_id, d))) or {\n 'execution_date': d.isoformat(),\n 'task_id': task.task_id\n }\n for d in dates],\n children_key: children,\n 'num_dep': len(task.upstream_list),\n 'operator': task.task_type,\n 'retries': task.retries,\n 'owner': task.owner,\n 'start_date': task.start_date,\n 'end_date': task.end_date,\n 'depends_on_past': task.depends_on_past,\n 'ui_color': task.ui_color,\n }\n data = {\n 'name': '[DAG]',\n 'children': [recurse_nodes(t, set()) for t in dag.roots],\n 'instances': [\n dag_runs.get(d) or {'execution_date': d.isoformat()}\n for d in dates],\n }\n\n data = json.dumps(data, indent=4, default=json_ser)\n session.commit()\n session.close()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n return self.render(\n 'airflow/tree.html',\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n root=root,\n form=form,\n dag=dag, data=data, blur=blur)\n\n @expose('/graph')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n def graph(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if dag_id not in dagbag.dags:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect('/admin/')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n arrange = request.args.get('arrange', dag.orientation)\n\n nodes = []\n edges = []\n for task in dag.tasks:\n nodes.append({\n 'id': task.task_id,\n 'value': {\n 'label': task.task_id,\n 'labelStyle': \"fill:{0};\".format(task.ui_fgcolor),\n 'style': \"fill:{0};\".format(task.ui_color),\n }\n })\n\n def get_upstream(task):\n for t in task.upstream_list:\n edge = {\n 'u': t.task_id,\n 'v': task.task_id,\n }\n if edge not in edges:\n edges.append(edge)\n get_upstream(t)\n\n for t in dag.roots:\n get_upstream(t)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = dateutil.parser.parse(dttm)\n else:\n dttm = dag.latest_execution_date or datetime.now().date()\n\n DR = models.DagRun\n drs = (\n session.query(DR)\n .filter_by(dag_id=dag_id)\n .order_by(desc(DR.execution_date)).all()\n )\n dr_choices = []\n dr_state = None\n for dr in drs:\n dr_choices.append((dr.execution_date.isoformat(), dr.run_id))\n if dttm == dr.execution_date:\n dr_state = dr.state\n\n class GraphForm(Form):\n execution_date = SelectField(\"DAG run\", choices=dr_choices)\n arrange = SelectField(\"Layout\", choices=(\n ('LR', \"Left->Right\"),\n ('RL', \"Right->Left\"),\n ('TB', \"Top->Bottom\"),\n ('BT', \"Bottom->Top\"),\n ))\n form = GraphForm(\n data={'execution_date': dttm.isoformat(), 'arrange': arrange})\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n tasks = {\n t.task_id: {\n 'dag_id': t.dag_id,\n 'task_type': t.task_type,\n }\n for t in dag.tasks}\n if not tasks:\n flash(\"No tasks found\", \"error\")\n session.commit()\n session.close()\n doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''\n\n return self.render(\n 'airflow/graph.html',\n dag=dag,\n form=form,\n width=request.args.get('width', \"100%\"),\n height=request.args.get('height', \"800\"),\n execution_date=dttm.isoformat(),\n state_token=state_token(dr_state),\n doc_md=doc_md,\n arrange=arrange,\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n blur=blur,\n root=root or '',\n task_instances=json.dumps(task_instances, indent=2),\n tasks=json.dumps(tasks, indent=2),\n nodes=json.dumps(nodes, indent=2),\n edges=json.dumps(edges, indent=2),)\n\n @expose('/duration')\n @login_required\n @wwwutils.action_logging\n def duration(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = dateutil.parser.parse(base_date)\n else:\n base_date = dag.latest_execution_date or datetime.now()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=600, width=\"1200\")\n cum_chart = nvd3.lineChart(\n name=\"cumLineChart\", x_is_date=True, height=600, width=\"1200\")\n\n for task in dag.tasks:\n y = []\n x = []\n cum_y = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n if ti.duration:\n dttm = wwwutils.epoch(ti.execution_date)\n x.append(dttm)\n y.append(float(ti.duration) / (60*60))\n fails = session.query(models.TaskFail).filter_by(\n task_id=ti.task_id,\n dag_id=ti.dag_id,\n execution_date=ti.execution_date).all()\n fails_total = sum([f.duration for f in fails])\n cum_y.append(float(ti.duration + fails_total) / (60*60))\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n cum_chart.add_serie(name=task.task_id, x=x, y=cum_y)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n session.close()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildhtml()\n cum_chart.buildhtml()\n cum_chart_body = html.document_fromstring(str(cum_chart)).find('body')\n cum_chart_script = cum_chart_body.find('script')\n s_index = cum_chart_script.text.rfind('});')\n cum_chart_script.text = cum_chart_script.text[:s_index]\\\n + \"$( document ).trigger('chartload')\"\\\n + cum_chart_script.text[s_index:]\n\n return self.render(\n 'airflow/duration_chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart,\n cum_chart=html.tostring(cum_chart_body)\n )\n\n @expose('/tries')\n @login_required\n @wwwutils.action_logging\n def tries(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = dateutil.parser.parse(base_date)\n else:\n base_date = dag.latest_execution_date or datetime.now()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, y_axis_format='d', height=600, width=\"1200\")\n\n for task in dag.tasks:\n y = []\n x = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n dttm = wwwutils.epoch(ti.execution_date)\n x.append(dttm)\n y.append(ti.try_number)\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n tries = sorted(list({ti.try_number for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if tries else None\n\n session.commit()\n session.close()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n\n chart.buildhtml()\n\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart\n )\n\n @expose('/landing_times')\n @login_required\n @wwwutils.action_logging\n def landing_times(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = dateutil.parser.parse(base_date)\n else:\n base_date = dag.latest_execution_date or datetime.now()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=600, width=\"1200\")\n for task in dag.tasks:\n y = []\n x = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n ts = ti.execution_date\n if dag.schedule_interval:\n ts = dag.following_schedule(ts)\n if ti.end_date:\n dttm = wwwutils.epoch(ti.execution_date)\n secs = old_div((ti.end_date - ts).total_seconds(), 60*60)\n x.append(dttm)\n y.append(secs)\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n session.close()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n chart=chart,\n height=\"700px\",\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n )\n\n @expose('/paused')\n @login_required\n @wwwutils.action_logging\n def paused(self):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n session = settings.Session()\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n if request.args.get('is_paused') == 'false':\n orm_dag.is_paused = True\n else:\n orm_dag.is_paused = False\n session.merge(orm_dag)\n session.commit()\n session.close()\n\n dagbag.get_dag(dag_id)\n return \"OK\"\n\n @expose('/refresh')\n @login_required\n @wwwutils.action_logging\n def refresh(self):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n session = settings.Session()\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n\n if orm_dag:\n orm_dag.last_expired = datetime.now()\n session.merge(orm_dag)\n session.commit()\n session.close()\n\n dagbag.get_dag(dag_id)\n flash(\"DAG [{}] is now fresh as a daisy\".format(dag_id))\n return redirect(request.referrer)\n\n @expose('/refresh_all')\n @login_required\n @wwwutils.action_logging\n def refresh_all(self):\n dagbag.collect_dags(only_if_updated=False)\n flash(\"All DAGs are now up to date\")\n return redirect('/')\n\n @expose('/gantt')\n @login_required\n @wwwutils.action_logging\n def gantt(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n demo_mode = conf.getboolean('webserver', 'demo_mode')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = dateutil.parser.parse(dttm)\n else:\n dttm = dag.latest_execution_date or datetime.now().date()\n\n form = DateTimeForm(data={'execution_date': dttm})\n\n tis = [\n ti for ti in dag.get_task_instances(session, dttm, dttm)\n if ti.start_date]\n tis = sorted(tis, key=lambda ti: ti.start_date)\n\n tasks = []\n for ti in tis:\n tasks.append({\n 'startDate': wwwutils.epoch(ti.start_date),\n 'endDate': wwwutils.epoch(ti.end_date or datetime.now()),\n 'isoStart': ti.start_date.isoformat()[:-4],\n 'isoEnd': ti.end_date.isoformat()[:-4],\n 'taskName': ti.task_id,\n 'duration': \"{}\".format(ti.end_date - ti.start_date)[:-4],\n 'status': ti.state,\n 'executionDate': ti.execution_date.isoformat(),\n })\n states = {ti.state:ti.state for ti in tis}\n data = {\n 'taskNames': [ti.task_id for ti in tis],\n 'tasks': tasks,\n 'taskStatus': states,\n 'height': len(tis) * 25,\n }\n\n session.commit()\n session.close()\n\n return self.render(\n 'airflow/gantt.html',\n dag=dag,\n execution_date=dttm.isoformat(),\n form=form,\n data=json.dumps(data, indent=2),\n base_date='',\n demo_mode=demo_mode,\n root=root,\n )\n\n @expose('/object/task_instances')\n @login_required\n @wwwutils.action_logging\n def task_instances(self):\n session = settings.Session()\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = dateutil.parser.parse(dttm)\n else:\n return (\"Error: Invalid execution_date\")\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n\n return json.dumps(task_instances)\n\n @expose('/variables/<form>', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def variables(self, form):\n try:\n if request.method == 'POST':\n data = request.json\n if data:\n session = settings.Session()\n var = models.Variable(key=form, val=json.dumps(data))\n session.add(var)\n session.commit()\n return \"\"\n else:\n return self.render(\n 'airflow/variables/{}.html'.format(form)\n )\n except:\n return (\"Error: form airflow/variables/{}.html \"\n \"not found.\").format(form), 404\n\n @expose('/varimport', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def varimport(self):\n try:\n out = str(request.files['file'].read())\n d = json.loads(out)\n except Exception:\n flash(\"Missing file or syntax error.\")\n else:\n for k, v in d.items():\n models.Variable.set(k, v, serialize_json=isinstance(v, dict))\n flash(\"{} variable(s) successfully updated.\".format(len(d)))\n return redirect('/admin/variable')\n\nclass HomeView(AdminIndexView):\n @expose(\"/\")\n @login_required\n def index(self):\n session = Session()\n DM = models.DagModel\n qry = None\n\n # restrict the dags shown if filter_by_owner and current user is not superuser\n do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())\n owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()\n\n # read orm_dags from the db\n\n qry = session.query(DM)\n qry_fltr = []\n\n if do_filter and owner_mode == 'ldapgroup':\n qry_fltr = qry.filter(\n ~DM.is_subdag, DM.is_active,\n DM.owners.in_(current_user.ldap_groups)\n ).all()\n elif do_filter and owner_mode == 'user':\n qry_fltr = qry.filter(\n ~DM.is_subdag, DM.is_active,\n DM.owners == current_user.user.username\n ).all()\n else:\n qry_fltr = qry.filter(\n ~DM.is_subdag, DM.is_active\n ).all()\n\n orm_dags = {dag.dag_id: dag for dag in qry_fltr}\n\n import_errors = session.query(models.ImportError).all()\n for ie in import_errors:\n flash(\n \"Broken DAG: [{ie.filename}] {ie.stacktrace}\".format(ie=ie),\n \"error\")\n session.expunge_all()\n session.commit()\n session.close()\n\n # get a list of all non-subdag dags visible to everyone\n unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag]\n\n # optionally filter to get only dags that the user should see\n if do_filter and owner_mode == 'ldapgroup':\n # only show dags owned by someone in @current_user.ldap_groups\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner in current_user.ldap_groups\n }\n elif do_filter and owner_mode == 'user':\n # only show dags owned by @current_user.user.username\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner == current_user.user.username\n }\n else:\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n }\n\n all_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))\n return self.render(\n 'airflow/dags.html',\n webserver_dags=webserver_dags,\n orm_dags=orm_dags,\n all_dag_ids=all_dag_ids)\n\n\nclass QueryView(wwwutils.DataProfilingMixin, BaseView):\n @expose('/')\n @wwwutils.gzipped\n def query(self):\n session = settings.Session()\n dbs = session.query(models.Connection).order_by(\n models.Connection.conn_id).all()\n session.expunge_all()\n db_choices = list(\n ((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))\n conn_id_str = request.args.get('conn_id')\n csv = request.args.get('csv') == \"true\"\n sql = request.args.get('sql')\n\n class QueryForm(Form):\n conn_id = SelectField(\"Layout\", choices=db_choices)\n sql = TextAreaField(\"SQL\", widget=wwwutils.AceEditorWidget())\n data = {\n 'conn_id': conn_id_str,\n 'sql': sql,\n }\n results = None\n has_data = False\n error = False\n if conn_id_str:\n db = [db for db in dbs if db.conn_id == conn_id_str][0]\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))\n # df = hook.get_pandas_df(sql)\n has_data = len(df) > 0\n df = df.fillna('')\n results = df.to_html(\n classes=[\n 'table', 'table-bordered', 'table-striped', 'no-wrap'],\n index=False,\n na_rep='',\n ) if has_data else ''\n except Exception as e:\n flash(str(e), 'error')\n error = True\n\n if has_data and len(df) == QUERY_LIMIT:\n flash(\n \"Query output truncated at \" + str(QUERY_LIMIT) +\n \" rows\", 'info')\n\n if not has_data and error:\n flash('No data', 'error')\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n form = QueryForm(request.form, data=data)\n session.commit()\n session.close()\n return self.render(\n 'airflow/query.html', form=form,\n title=\"Ad Hoc Query\",\n results=results or '',\n has_data=has_data)\n\n\nclass AirflowModelView(ModelView):\n list_template = 'airflow/model_list.html'\n edit_template = 'airflow/model_edit.html'\n create_template = 'airflow/model_create.html'\n column_display_actions = True\n page_size = 500\n\n\nclass ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):\n \"\"\"\n Modifying the base ModelView class for non edit, browse only operations\n \"\"\"\n named_filter_urls = True\n can_create = False\n can_edit = False\n can_delete = False\n column_display_pk = True\n\n\nclass PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):\n column_list = ('pool', 'slots', 'used_slots', 'queued_slots')\n column_formatters = dict(\n pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)\n named_filter_urls = True\n\n\nclass SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):\n verbose_name_plural = \"SLA misses\"\n verbose_name = \"SLA miss\"\n column_list = (\n 'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')\n column_formatters = dict(\n task_id=task_instance_link,\n execution_date=datetime_f,\n timestamp=datetime_f,\n dag_id=dag_link)\n named_filter_urls = True\n column_searchable_list = ('dag_id', 'task_id',)\n column_filters = (\n 'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')\n form_widget_args = {\n 'email_sent': {'disabled': True},\n 'timestamp': {'disabled': True},\n }\n\n\nclass ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"chart\"\n verbose_name_plural = \"charts\"\n form_columns = (\n 'label',\n 'owner',\n 'conn_id',\n 'chart_type',\n 'show_datatable',\n 'x_is_date',\n 'y_log_scale',\n 'show_sql',\n 'height',\n 'sql_layout',\n 'sql',\n 'default_params',)\n column_list = (\n 'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)\n column_formatters = dict(label=label_link, last_modified=datetime_f)\n column_default_sort = ('last_modified', True)\n create_template = 'airflow/chart/create.html'\n edit_template = 'airflow/chart/edit.html'\n column_filters = ('label', 'owner.username', 'conn_id')\n column_searchable_list = ('owner.username', 'label', 'sql')\n column_descriptions = {\n 'label': \"Can include {{ templated_fields }} and {{ macros }}\",\n 'chart_type': \"The type of chart to be displayed\",\n 'sql': \"Can include {{ templated_fields }} and {{ macros }}.\",\n 'height': \"Height of the chart, in pixels.\",\n 'conn_id': \"Source database to run the query against\",\n 'x_is_date': (\n \"Whether the X axis should be casted as a date field. Expect most \"\n \"intelligible date formats to get casted properly.\"\n ),\n 'owner': (\n \"The chart's owner, mostly used for reference and filtering in \"\n \"the list view.\"\n ),\n 'show_datatable':\n \"Whether to display an interactive data table under the chart.\",\n 'default_params': (\n 'A dictionary of {\"key\": \"values\",} that define what the '\n 'templated fields (parameters) values should be by default. '\n 'To be valid, it needs to \"eval\" as a Python dict. '\n 'The key values will show up in the url\\'s querystring '\n 'and can be altered there.'\n ),\n 'show_sql': \"Whether to display the SQL statement as a collapsible \"\n \"section in the chart page.\",\n 'y_log_scale': \"Whether to use a log scale for the Y axis.\",\n 'sql_layout': (\n \"Defines the layout of the SQL that the application should \"\n \"expect. Depending on the tables you are sourcing from, it may \"\n \"make more sense to pivot / unpivot the metrics.\"\n ),\n }\n column_labels = {\n 'sql': \"SQL\",\n 'height': \"Chart Height\",\n 'sql_layout': \"SQL Layout\",\n 'show_sql': \"Display the SQL Statement\",\n 'default_params': \"Default Parameters\",\n }\n form_choices = {\n 'chart_type': [\n ('line', 'Line Chart'),\n ('spline', 'Spline Chart'),\n ('bar', 'Bar Chart'),\n ('column', 'Column Chart'),\n ('area', 'Overlapping Area Chart'),\n ('stacked_area', 'Stacked Area Chart'),\n ('percent_area', 'Percent Area Chart'),\n ('datatable', 'No chart, data table only'),\n ],\n 'sql_layout': [\n ('series', 'SELECT series, x, y FROM ...'),\n ('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),\n ],\n 'conn_id': [\n (c.conn_id, c.conn_id)\n for c in (\n Session().query(models.Connection.conn_id)\n .group_by(models.Connection.conn_id)\n )\n ]\n }\n\n def on_model_change(self, form, model, is_created=True):\n if model.iteration_no is None:\n model.iteration_no = 0\n else:\n model.iteration_no += 1\n if not model.user_id and current_user and hasattr(current_user, 'id'):\n model.user_id = current_user.id\n model.last_modified = datetime.now()\n\nchart_mapping = (\n ('line', 'lineChart'),\n ('spline', 'lineChart'),\n ('bar', 'multiBarChart'),\n ('column', 'multiBarChart'),\n ('area', 'stackedAreaChart'),\n ('stacked_area', 'stackedAreaChart'),\n ('percent_area', 'stackedAreaChart'),\n ('datatable', 'datatable'),\n)\nchart_mapping = dict(chart_mapping)\n\n\nclass KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"known event\"\n verbose_name_plural = \"known events\"\n form_columns = (\n 'label',\n 'event_type',\n 'start_date',\n 'end_date',\n 'reported_by',\n 'description')\n column_list = (\n 'label', 'event_type', 'start_date', 'end_date', 'reported_by')\n column_default_sort = (\"start_date\", True)\n\n\nclass KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):\n pass\n\n\n# NOTE: For debugging / troubleshooting\n# mv = KnowEventTypeView(\n# models.KnownEventType,\n# Session, name=\"Known Event Types\", category=\"Manage\")\n# admin.add_view(mv)\n# class DagPickleView(SuperUserMixin, ModelView):\n# pass\n# mv = DagPickleView(\n# models.DagPickle,\n# Session, name=\"Pickles\", category=\"Manage\")\n# admin.add_view(mv)\n\n\nclass VariableView(wwwutils.LoginMixin, AirflowModelView):\n verbose_name = \"Variable\"\n verbose_name_plural = \"Variables\"\n list_template = 'airflow/variable_list.html'\n\n def hidden_field_formatter(view, context, model, name):\n if should_hide_value_for_key(model.key):\n return Markup('*' * 8)\n return getattr(model, name)\n\n form_columns = (\n 'key',\n 'val',\n )\n column_list = ('key', 'val', 'is_encrypted',)\n column_filters = ('key', 'val')\n column_searchable_list = ('key', 'val')\n form_widget_args = {\n 'is_encrypted': {'disabled': True},\n 'val': {\n 'rows': 20,\n }\n }\n column_sortable_list = (\n 'key',\n 'val',\n 'is_encrypted',\n )\n column_formatters = {\n 'val': hidden_field_formatter\n }\n\n # Default flask-admin export functionality doesn't handle serialized json\n @action('varexport', 'Export', None)\n def action_varexport(self, ids):\n V = models.Variable\n session = settings.Session()\n qry = session.query(V).filter(V.id.in_(ids)).all()\n session.close()\n\n var_dict = {}\n d = json.JSONDecoder()\n for var in qry:\n val = None\n try:\n val = d.decode(var.val)\n except:\n val = var.val\n var_dict[var.key] = val\n\n response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=variables.json\"\n return response\n\n def on_form_prefill(self, form, id):\n if should_hide_value_for_key(form.key.data):\n form.val.data = '*' * 8\n\n\nclass JobModelView(ModelViewOnly):\n verbose_name_plural = \"jobs\"\n verbose_name = \"job\"\n column_default_sort = ('start_date', True)\n column_filters = (\n 'job_type', 'dag_id', 'state',\n 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')\n column_formatters = dict(\n start_date=datetime_f,\n end_date=datetime_f,\n hostname=nobr_f,\n state=state_f,\n latest_heartbeat=datetime_f)\n\n\nclass DagRunModelView(ModelViewOnly):\n verbose_name_plural = \"DAG Runs\"\n can_delete = True\n can_edit = True\n can_create = True\n column_editable_list = ('state',)\n verbose_name = \"dag run\"\n column_default_sort = ('execution_date', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n form_args = dict(\n dag_id=dict(validators=[validators.DataRequired()])\n )\n column_list = (\n 'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')\n column_filters = column_list\n column_searchable_list = ('dag_id', 'state', 'run_id')\n column_formatters = dict(\n execution_date=datetime_f,\n state=state_f,\n start_date=datetime_f,\n dag_id=dag_link)\n\n @action('set_running', \"Set state to 'running'\", None)\n def action_set_running(self, ids):\n self.set_dagrun_state(ids, State.RUNNING)\n\n @action('set_failed', \"Set state to 'failed'\", None)\n def action_set_failed(self, ids):\n self.set_dagrun_state(ids, State.FAILED)\n\n @action('set_success', \"Set state to 'success'\", None)\n def action_set_success(self, ids):\n self.set_dagrun_state(ids, State.SUCCESS)\n\n @provide_session\n def set_dagrun_state(self, ids, target_state, session=None):\n try:\n DR = models.DagRun\n count = 0\n for dr in session.query(DR).filter(DR.id.in_(ids)).all():\n count += 1\n dr.state = target_state\n if target_state == State.RUNNING:\n dr.start_date = datetime.now()\n else:\n dr.end_date = datetime.now()\n session.commit()\n flash(\n \"{count} dag runs were set to '{target_state}'\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n\nclass LogModelView(ModelViewOnly):\n verbose_name_plural = \"logs\"\n verbose_name = \"log\"\n column_default_sort = ('dttm', True)\n column_filters = ('dag_id', 'task_id', 'execution_date')\n column_formatters = dict(\n dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)\n\n\nclass TaskInstanceModelView(ModelViewOnly):\n verbose_name_plural = \"task instances\"\n verbose_name = \"task instance\"\n column_filters = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'hostname',\n 'queue', 'pool', 'operator', 'start_date', 'end_date')\n named_filter_urls = True\n column_formatters = dict(\n log_url=log_url_formatter,\n task_id=task_instance_link,\n hostname=nobr_f,\n state=state_f,\n execution_date=datetime_f,\n start_date=datetime_f,\n end_date=datetime_f,\n queued_dttm=datetime_f,\n dag_id=dag_link, duration=duration_f)\n column_searchable_list = ('dag_id', 'task_id', 'state')\n column_default_sort = ('start_date', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n column_list = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'operator',\n 'start_date', 'end_date', 'duration', 'job_id', 'hostname',\n 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',\n 'pool', 'log_url')\n can_delete = True\n page_size = 500\n\n @action('set_running', \"Set state to 'running'\", None)\n def action_set_running(self, ids):\n self.set_task_instance_state(ids, State.RUNNING)\n\n @action('set_failed', \"Set state to 'failed'\", None)\n def action_set_failed(self, ids):\n self.set_task_instance_state(ids, State.FAILED)\n\n @action('set_success', \"Set state to 'success'\", None)\n def action_set_success(self, ids):\n self.set_task_instance_state(ids, State.SUCCESS)\n\n @action('set_retry', \"Set state to 'up_for_retry'\", None)\n def action_set_retry(self, ids):\n self.set_task_instance_state(ids, State.UP_FOR_RETRY)\n\n @action('delete',\n lazy_gettext('Delete'),\n lazy_gettext('Are you sure you want to delete selected records?'))\n def action_delete(self, ids):\n \"\"\"\n As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().\n\n TODO: this method should be removed once the below bug is fixed on Flask-Admin side.\n https://github.com/flask-admin/flask-admin/issues/1226\n \"\"\"\n if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):\n self.delete_task_instances(ids)\n else:\n super(TaskInstanceModelView, self).action_delete(ids)\n\n @provide_session\n def set_task_instance_state(self, ids, target_state, session=None):\n try:\n TI = models.TaskInstance\n count = len(ids)\n for id in ids:\n task_id, dag_id, execution_date = id.split(',')\n execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')\n ti = session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag_id,\n TI.execution_date == execution_date).one()\n ti.state = target_state\n session.commit()\n flash(\n \"{count} task instances were set to '{target_state}'\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n @provide_session\n def delete_task_instances(self, ids, session=None):\n try:\n TI = models.TaskInstance\n count = 0\n for id in ids:\n task_id, dag_id, execution_date = id.split(',')\n execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')\n count += session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag_id,\n TI.execution_date == execution_date).delete()\n session.commit()\n flash(\"{count} task instances were deleted\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to delete', 'error')\n\n def get_one(self, id):\n \"\"\"\n As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().\n\n TODO: this method should be removed once the below bug is fixed on Flask-Admin side.\n https://github.com/flask-admin/flask-admin/issues/1226\n \"\"\"\n task_id, dag_id, execution_date = iterdecode(id)\n execution_date = dateutil.parser.parse(execution_date)\n return self.session.query(self.model).get((task_id, dag_id, execution_date))\n\n\nclass ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):\n create_template = 'airflow/conn_create.html'\n edit_template = 'airflow/conn_edit.html'\n list_template = 'airflow/conn_list.html'\n form_columns = (\n 'conn_id',\n 'conn_type',\n 'host',\n 'schema',\n 'login',\n 'password',\n 'port',\n 'extra',\n 'extra__jdbc__drv_path',\n 'extra__jdbc__drv_clsname',\n 'extra__google_cloud_platform__project',\n 'extra__google_cloud_platform__key_path',\n 'extra__google_cloud_platform__scope',\n )\n verbose_name = \"Connection\"\n verbose_name_plural = \"Connections\"\n column_default_sort = ('conn_id', False)\n column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)\n form_overrides = dict(_password=PasswordField)\n form_widget_args = {\n 'is_extra_encrypted': {'disabled': True},\n 'is_encrypted': {'disabled': True},\n }\n # Used to customized the form, the forms elements get rendered\n # and results are stored in the extra field as json. All of these\n # need to be prefixed with extra__ and then the conn_type ___ as in\n # extra__{conn_type}__name. You can also hide form elements and rename\n # others from the connection_form.js file\n form_extra_fields = {\n 'extra__jdbc__drv_path': StringField('Driver Path'),\n 'extra__jdbc__drv_clsname': StringField('Driver Class'),\n 'extra__google_cloud_platform__project': StringField('Project Id'),\n 'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),\n 'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),\n\n }\n form_choices = {\n 'conn_type': [\n ('fs', 'File (path)'),\n ('ftp', 'FTP',),\n ('google_cloud_platform', 'Google Cloud Platform'),\n ('hdfs', 'HDFS',),\n ('http', 'HTTP',),\n ('hive_cli', 'Hive Client Wrapper',),\n ('hive_metastore', 'Hive Metastore Thrift',),\n ('hiveserver2', 'Hive Server 2 Thrift',),\n ('jdbc', 'Jdbc Connection',),\n ('mysql', 'MySQL',),\n ('postgres', 'Postgres',),\n ('oracle', 'Oracle',),\n ('vertica', 'Vertica',),\n ('presto', 'Presto',),\n ('s3', 'S3',),\n ('samba', 'Samba',),\n ('sqlite', 'Sqlite',),\n ('ssh', 'SSH',),\n ('cloudant', 'IBM Cloudant',),\n ('mssql', 'Microsoft SQL Server'),\n ('mesos_framework-id', 'Mesos Framework ID'),\n ]\n }\n\n def on_model_change(self, form, model, is_created):\n formdata = form.data\n if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:\n extra = {\n key:formdata[key]\n for key in self.form_extra_fields.keys() if key in formdata}\n model.extra = json.dumps(extra)\n\n @classmethod\n def alert_fernet_key(cls):\n fk = None\n try:\n fk = conf.get('core', 'fernet_key')\n except:\n pass\n return fk is None\n\n @classmethod\n def is_secure(cls):\n \"\"\"\n Used to display a message in the Connection list view making it clear\n that the passwords and `extra` field can't be encrypted.\n \"\"\"\n is_secure = False\n try:\n import cryptography\n conf.get('core', 'fernet_key')\n is_secure = True\n except:\n pass\n return is_secure\n\n def on_form_prefill(self, form, id):\n try:\n d = json.loads(form.data.get('extra', '{}'))\n except Exception as e:\n d = {}\n\n for field in list(self.form_extra_fields.keys()):\n value = d.get(field, '')\n if value:\n field = getattr(form, field)\n field.data = value\n\n\nclass UserModelView(wwwutils.SuperUserMixin, AirflowModelView):\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"\n column_default_sort = 'username'\n\n\nclass VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):\n @expose('/')\n def version(self):\n # Look at the version from setup.py\n try:\n airflow_version = pkg_resources.require(\"airflow\")[0].version\n except Exception as e:\n airflow_version = None\n self.logger.error(e)\n\n # Get the Git repo and git hash\n git_version = None\n try:\n with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:\n git_version = f.readline()\n except Exception as e:\n self.logger.error(e)\n\n # Render information\n title = \"Version Info\"\n return self.render('airflow/version.html',\n title=title,\n airflow_version=airflow_version,\n git_version=git_version)\n\n\nclass ConfigurationView(wwwutils.SuperUserMixin, BaseView):\n @expose('/')\n def conf(self):\n raw = request.args.get('raw') == \"true\"\n title = \"Airflow Configuration\"\n subtitle = conf.AIRFLOW_CONFIG\n if conf.getboolean(\"webserver\", \"expose_config\"):\n with open(conf.AIRFLOW_CONFIG, 'r') as f:\n config = f.read()\n else:\n config = (\n \"# You Airflow administrator chose not to expose the \"\n \"configuration, most likely for security reasons.\")\n if raw:\n return Response(\n response=config,\n status=200,\n mimetype=\"application/text\")\n else:\n code_html = Markup(highlight(\n config,\n lexers.IniLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/code.html',\n pre_subtitle=settings.HEADER + \" v\" + airflow.__version__,\n code_html=code_html, title=title, subtitle=subtitle)\n\n\nclass DagModelView(wwwutils.SuperUserMixin, ModelView):\n column_list = ('dag_id', 'owners')\n column_editable_list = ('is_paused',)\n form_excluded_columns = ('is_subdag', 'is_active')\n column_searchable_list = ('dag_id',)\n column_filters = (\n 'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',\n 'last_scheduler_run', 'last_expired')\n form_widget_args = {\n 'last_scheduler_run': {'disabled': True},\n 'fileloc': {'disabled': True},\n 'is_paused': {'disabled': True},\n 'last_pickled': {'disabled': True},\n 'pickle_id': {'disabled': True},\n 'last_loaded': {'disabled': True},\n 'last_expired': {'disabled': True},\n 'pickle_size': {'disabled': True},\n 'scheduler_lock': {'disabled': True},\n 'owners': {'disabled': True},\n }\n column_formatters = dict(\n dag_id=dag_link,\n )\n can_delete = False\n can_create = False\n page_size = 50\n list_template = 'airflow/list_dags.html'\n named_filter_urls = True\n\n def get_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super(DagModelView, self)\n .get_query()\n .filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\n .filter(~models.DagModel.is_subdag)\n )\n\n def get_count_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super(DagModelView, self)\n .get_count_query()\n .filter(models.DagModel.is_active)\n .filter(~models.DagModel.is_subdag)\n )\n" ]
[ [ "pandas.set_option", "pandas.to_datetime" ] ]
Abner0627/Traffic-Sign-Classifier
[ "c11ea480c842a25696653ee1275ca15a1891d067" ]
[ "func.py" ]
[ "import numpy as np\n\n\ndef _norm(x, Z=True):\n x_n = np.zeros_like(x)\n for n in range(x.shape[0]):\n xs = x[n,...]\n for i in range(xs.shape[0]):\n xss = xs[i,:,:]\n x_mu = np.mean(xss)\n if Z:\n x_std = np.std(xss)\n x_n[n,i,:,:] = (xss-x_mu)/x_std\n else:\n x_min = np.min(xss)\n x_max = np.max(xss)\n x_n[n,i,:,:] = (xss-x_mu)/(x_max-x_min)\n return x_n\n\ndef _gray(x):\n # 3, 32, 32\n gray = np.zeros((x.shape[0], 1, x.shape[-1], x.shape[-1]))\n rgb_weights = [0.2989, 0.5870, 0.1140]\n for i in range(len(x)):\n img = x[i,:,:,:].transpose(1,2,0)\n gray[i,:,:,:] = np.dot(img[...,:3], rgb_weights)\n return gray\n\n\n#%% Test\nif __name__ == \"__main__\":\n x = np.random.rand(2,3,32,32)\n y = _norm(x)\n print(y.shape)\n\n" ]
[ [ "numpy.dot", "numpy.min", "numpy.max", "numpy.std", "numpy.mean", "numpy.zeros_like", "numpy.random.rand", "numpy.zeros" ] ]
xumingze0308/eLR-WACV2018
[ "ac2ef36f6272f02c44fb862c9f6d63140349eb6b" ]
[ "tools/pretrained_spatial_temporal_rnn_bidir/eval.py" ]
[ "import os\nimport os.path as osp\nimport sys\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as data\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nimport _init_paths\nimport utils as utl\nfrom configs.hmdb51 import parse_hmdb51_args as parse_args\nfrom datasets import HMDB51DataLayer as DataLayer\nfrom models import RNN as Model\n\ndef main(args):\n this_dir = osp.join(osp.dirname(__file__), '.')\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n data_loader = data.DataLoader(\n DataLayer(\n data_root=osp.join(args.data_root, 'Test'),\n phase='Test',\n ),\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n )\n\n if osp.isfile(args.checkpoint):\n checkpoint = torch.load(args.checkpoint)\n else:\n raise(RuntimeError('Cannot find the checkpoint {}'.format(args.checkpoint)))\n model = Model().to(device)\n model.load_state_dict(checkpoint)\n model.train(False)\n softmax = nn.Softmax(dim=1).to(device)\n\n corrects = 0.0\n with torch.set_grad_enabled(False):\n for batch_idx, (spatial, temporal, length, target) in enumerate(data_loader):\n spatial_input = torch.zeros(*spatial.shape)\n temporal_input = torch.zeros(*temporal.shape)\n target_input = []\n length_input = []\n\n index = utl.argsort(length)[::-1]\n for i, idx in enumerate(index):\n spatial_input[i] = spatial[idx]\n temporal_input[i] = temporal[idx]\n target_input.append(target[idx])\n length_input.append(length[idx])\n\n spatial_input = spatial_input.to(device)\n temporal_input = temporal_input.to(device)\n target_input = torch.LongTensor(target_input).to(device)\n pack1 = pack_padded_sequence(spatial_input, length_input, batch_first=True)\n pack2 = pack_padded_sequence(temporal_input, length_input, batch_first=True)\n\n score = model(pack1, pack2)\n pred = torch.max(softmax(score), 1)[1].cpu()\n corrects += torch.sum(pred == target_input.cpu()).item()\n\n print('The accuracy is {:.4f}'.format(corrects/len(data_loader.dataset)))\n\nif __name__ == '__main__':\n main(parse_args())\n\n" ]
[ [ "torch.nn.Softmax", "torch.LongTensor", "torch.zeros", "torch.load", "torch.nn.utils.rnn.pack_padded_sequence", "torch.set_grad_enabled", "torch.cuda.is_available" ] ]
AhmedIdr/FARM
[ "b65c6985f9301e6e8c1cd491124477098ff1621d" ]
[ "farm/infer.py" ]
[ "import os\nimport torch\n\nfrom torch.utils.data.sampler import SequentialSampler\n\nfrom farm.data_handler.dataloader import NamedDataLoader\nfrom farm.modeling.adaptive_model import AdaptiveModel\n\nfrom farm.utils import initialize_device_settings\nfrom farm.data_handler.processor import Processor\nfrom farm.utils import set_all_seeds\n\n\nclass Inferencer:\n \"\"\"\n Loads a saved AdaptiveModel from disk and runs it in inference mode. Can be used for a model with prediction head (down-stream predictions) and without (using LM as embedder).\n\n Example usage:\n\n .. code-block:: python\n\n # down-stream inference\n basic_texts = [\n {\"text\": \"Schartau sagte dem Tagesspiegel, dass Fischer ein Idiot sei\"},\n {\"text\": \"Martin Müller spielt Handball in Berlin\"},\n ]\n model = Inferencer(your_model_dir)\n model.run_inference(dicts=basic_texts)\n # LM embeddings\n model.extract_vectors(dicts=basic_texts)\n\n \"\"\"\n\n def __init__(self, model, processor, batch_size=4, gpu=False, name=None):\n \"\"\"\n Initializes inferencer from an AdaptiveModel and a Processor instance.\n\n :param model: AdaptiveModel to run in inference mode\n :type model: AdaptiveModel\n :param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.\n :type processor: Processor\n :param batch_size: Number of samples computed once per batch\n :type batch_size: int\n :param gpu: If GPU shall be used\n :type gpu: bool\n :param name: Name for the current inferencer model, displayed in the REST API\n :type name: string\n :return: An instance of the Inferencer.\n\n \"\"\"\n # Init device and distributed settings\n device, n_gpu = initialize_device_settings(\n use_cuda=gpu, local_rank=-1, fp16=False\n )\n\n self.processor = processor\n self.model = model\n self.model.eval()\n self.batch_size = batch_size\n self.device = device\n self.language = self.model.language_model.language\n # TODO adjust for multiple prediction heads\n if len(self.model.prediction_heads) == 1:\n self.prediction_type = self.model.prediction_heads[0].model_type\n self.label_map = self.processor.label_maps[0]\n elif len(self.model.prediction_heads) == 0:\n self.prediction_type = \"embedder\"\n self.name = name if name != None else f\"anonymous-{self.prediction_type}\"\n set_all_seeds(42, n_gpu)\n\n @classmethod\n def load(cls, load_dir, batch_size=4, gpu=False):\n \"\"\"\n Initializes inferencer from directory with saved model.\n :param load_dir: Directory where the saved model is located.\n :type load_dir: str\n :param batch_size: Number of samples computed once per batch\n :type batch_size: int\n :param gpu: If GPU shall be used\n :type gpu: bool\n :return: An instance of the Inferencer.\n \"\"\"\n\n device, n_gpu = initialize_device_settings(\n use_cuda=gpu, local_rank=-1, fp16=False\n )\n\n model = AdaptiveModel.load(load_dir, device)\n processor = Processor.load_from_dir(load_dir)\n name = os.path.basename(load_dir)\n return cls(model, processor, batch_size=batch_size, gpu=gpu, name=name)\n\n def run_inference(self, dicts):\n \"\"\"\n Runs down-stream inference using the prediction head.\n :param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.\n :type dicst: [dict]\n :return: dict of predictions\n\n \"\"\"\n if self.prediction_type == \"embedder\":\n raise TypeError(\"You have called run_inference for a model without any prediction head! \"\n \"If you want to: \"\n \"a) ... extract vectors from the language model: call `Inferencer.extract_vectors(...)`\"\n f\"b) ... run inference on a downstream task: make sure your model path {self.name} contains a saved prediction head\")\n dataset, tensor_names = self.processor.dataset_from_dicts(dicts)\n samples = []\n for dict in dicts:\n samples.extend(self.processor._dict_to_samples(dict))\n\n data_loader = NamedDataLoader(\n dataset=dataset,\n sampler=SequentialSampler(dataset),\n batch_size=self.batch_size,\n tensor_names=tensor_names,\n )\n\n preds_all = []\n for i,batch in enumerate(data_loader):\n batch = {key: batch[key].to(self.device) for key in batch}\n batch_samples = samples[i*self.batch_size:(i+1)*self.batch_size]\n with torch.no_grad():\n logits = self.model.forward(**batch)\n preds = self.model.formatted_preds(\n logits=logits,\n label_maps=self.processor.label_maps,\n samples=batch_samples,\n tokenizer=self.processor.tokenizer,\n **batch\n )\n preds_all += preds\n\n return preds_all\n\n def extract_vectors(self, dicts, extraction_strategy=\"pooled\"):\n \"\"\"\n Converts a text into vector(s) using the language model only (no prediction head involved).\n :param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.\n :type dicts: [dict]\n :param extraction_strategy: Strategy to extract vectors. Choices: 'pooled' (sentence vector), 'per_token' (individual token vectors)\n :type extraction_strategy: str\n :return: dict of predictions\n \"\"\"\n dataset, tensor_names = self.processor.dataset_from_dicts(dicts)\n samples = []\n for dict in dicts:\n samples.extend(self.processor._dict_to_samples(dict))\n\n data_loader = NamedDataLoader(\n dataset=dataset,\n sampler=SequentialSampler(dataset),\n batch_size=self.batch_size,\n tensor_names=tensor_names,\n )\n\n preds_all = []\n for i,batch in enumerate(data_loader):\n batch = {key: batch[key].to(self.device) for key in batch}\n batch_samples = samples[i*self.batch_size:(i+1)*self.batch_size]\n with torch.no_grad():\n preds = self.model.language_model.formatted_preds(\n extraction_strategy=extraction_strategy,\n samples=batch_samples,\n tokenizer=self.processor.tokenizer,\n **batch\n )\n preds_all += preds\n\n return preds_all" ]
[ [ "torch.utils.data.sampler.SequentialSampler", "torch.no_grad" ] ]
Cloud-PG/smart-cache
[ "467987abece3fd4830fd615288046359761229f8" ]
[ "scripts/DataAnalysis/clean_csv.py" ]
[ "import gzip\nfrom os import path, walk, makedirs\nfrom sys import argv\n\nimport pandas as pd\n\n\ndef main():\n source_folder = argv[1]\n out_folder = f\"{source_folder}_clean\"\n makedirs(out_folder, exist_ok=True)\n for root, _, files in walk(source_folder):\n for file_ in files:\n _, ext = path.splitext(file_)\n if ext == \".gz\":\n print(f\"=> {file_}\")\n with gzip.GzipFile(path.join(root, file_)) as gzipfile:\n df = pd.read_csv(gzipfile)\n print(f\"==> Cur. schape: {df.shape}\")\n print(f\"===> Remove nan...\")\n df = df.dropna()\n if 'index' in df.columns:\n print(f\"===> Remove index...\")\n del df['index']\n print(f\"==> Cur. schape: {df.shape}\")\n\n with gzip.GzipFile(path.join(out_folder, file_), \"wb\") as cleanfile:\n cleanfile.write(df.to_csv(index=False).encode(\"utf-8\"))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
chyld/pytorch-image-classification
[ "484fe1b5bb234e4bf8b38938aae0bdfa3613d0d7" ]
[ "network.py" ]
[ "import torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nimport numpy as np\n\n\nclass Network:\n\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n\n def __init__(self, arch, units, lr):\n # currently works with resnet, densenet & inception models\n self.arch, self.units, self.lr = arch, units, lr\n self.model = getattr(models, arch)(pretrained=True)\n class_layer_name = 'classifier' if 'classifier' in dir(self.model) else 'fc'\n in_size = getattr(self.model, class_layer_name).in_features\n out_size = 102\n\n # freeze parameters\n for param in self.model.parameters():\n param.requires_grad = False\n\n # create output classification layer\n output_layer = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(in_size, units)),\n ('relu', nn.ReLU()),\n ('dropout', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(units, out_size)),\n ('relu', nn.ReLU()),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n # replace old classification layer with custom one\n setattr(self.model, class_layer_name, output_layer)\n\n # loss and optimizer functions\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(getattr(self.model, class_layer_name).parameters(), lr=lr)\n\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n\n def train(self, gpu, epochs, train_loader, valid_loader):\n cuda = torch.cuda.is_available()\n\n if cuda and gpu:\n self.model.cuda()\n else:\n self.model.cpu()\n\n loss_per_x_batches = 0\n print_every_x_batches = 50\n\n self.model.train()\n\n for epoch in range(epochs):\n for ii, (inputs, labels) in enumerate(train_loader):\n ### -------------------------------------------------------------------------------- ###\n inputs, labels = Variable(\n inputs, requires_grad=True), Variable(labels)\n\n if cuda and gpu:\n inputs, labels = inputs.cuda(), labels.cuda()\n\n outputs = self.model.forward(inputs)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n loss_per_x_batches += loss.data[0]\n ### -------------------------------------------------------------------------------- ###\n if ii % print_every_x_batches == 0:\n self.model.eval()\n validation_loss, total_correct, accuracy = 0, 0, 0\n\n for _, (inputs, labels) in enumerate(valid_loader):\n inputs, labels = Variable(\n inputs, requires_grad=False, volatile=True), Variable(labels)\n\n if cuda and gpu:\n inputs, labels = inputs.cuda(), labels.cuda()\n\n outputs = self.model.forward(inputs)\n loss = self.criterion(outputs, labels)\n\n validation_loss += loss.data[0]\n probabilities = torch.exp(outputs).data\n equality = (labels.data == probabilities.max(1)[1])\n total_correct += equality.sum()\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n print(\n \"epoch: {} batch: {}\".format(epoch, ii),\n \"train loss: {:.3f}\".format(loss_per_x_batches),\n \"valid loss: {:.3f}\".format(validation_loss),\n \"total correct: {}\".format(total_correct),\n \"accuracy: {:.3f}\".format(accuracy),\n )\n\n self.model.train()\n loss_per_x_batches = 0\n\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n\n def save(self, save_dir, train_dataset):\n state = {'model_state': self.model.state_dict(),\n 'optimizer_state': self.optimizer.state_dict(),\n 'class_to_idx': train_dataset.class_to_idx,\n 'arch': self.arch,\n 'units': self.units,\n 'lr': self.lr\n }\n torch.save(state, save_dir + '/checkpoint.pth')\n\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n\n def predict(self, image, gpu, top_k, names):\n cuda = torch.cuda.is_available()\n\n # move the model to cuda\n if cuda and gpu:\n self.model.cuda()\n else:\n self.model.cpu()\n\n # turn dropout OFF\n self.model.eval()\n\n # convert numpy array to tensor\n image = torch.from_numpy(np.array([image])).float()\n\n # create variable from tensor\n image = Variable(image, requires_grad=False, volatile=True)\n\n # move the image to cuda\n if cuda and gpu:\n image = image.cuda()\n\n # forward propagation\n output = self.model.forward(image)\n\n # get probabilities\n probabilities = torch.exp(output).data\n\n # getting the topk probabilites and indexes\n top_p = torch.topk(probabilities, top_k)[0].tolist()[0]\n top_i = torch.topk(probabilities, top_k)[1].tolist()[0]\n\n # creating a reverse mapping from index to class\n idx_to_class = {v: k for k, v in self.model.class_to_idx.items()}\n\n # converting the list of indexes to list of classes\n top_c = list(map(lambda i: idx_to_class[i], top_i))\n\n # convert list of classes to list of flower names\n top_n = [names[c] for c in top_c]\n\n return list(zip(top_p, top_n))\n\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n ### ------------------------------------------------------ ###\n" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.exp", "torch.nn.Linear", "torch.save", "torch.FloatTensor", "torch.cuda.is_available", "torch.topk", "torch.nn.ReLU", "numpy.array", "torch.autograd.Variable" ] ]
Cl3V0r/MLSeminar
[ "d05f171a9b7d773ea123e1919e07312a7f0c9fe8" ]
[ "python/RNN.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, SimpleRNN, LeakyReLU, LSTM, SpatialDropout1D, Conv1D, MaxPooling1D\nfrom keras.preprocessing.text import one_hot, Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers.embeddings import Embedding\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\nfrom keras.models import load_model\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, classification_report, roc_curve, auc\nfrom sklearn.model_selection import train_test_split\n\ndef plot_history(network_history):\n plt.figure()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.plot(network_history.history['loss'])\n plt.plot(network_history.history['val_loss'])\n plt.legend(['Training', 'Validation'])\n plt.savefig(\"../build/plots/history_lstm_loss.pdf\")\n plt.clf()\n\n plt.figure()\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.plot(network_history.history['acc'])\n plt.plot(network_history.history['val_acc'])\n plt.legend(['Training', 'Validation'], loc='lower right')\n plt.savefig(\"../build/plots/history_lstm_acc.pdf\")\n plt.clf()\n\n\ndef evaluate(X_test, Y_test, X_train, Y_train, model):\n ##Evaluate loss and metrics and predict & classes\n loss, accuracy = model.evaluate(X_test, Y_test, verbose=0)\n Y_pred = model.predict(X_test, batch_size=1)\n Y_cls = model.predict_classes(X_test, batch_size=1)\n print('Test Loss:', loss)\n print('Accuracy: %.2f' % accuracy_score(Y_test, Y_cls))\n print(\"Precision: %.2f\" % precision_score(Y_test, Y_cls, average='weighted'))\n print(\"Recall: %.2f\" % recall_score(Y_test, Y_cls, average='weighted'))\n print('Classification Report:\\n', classification_report(Y_test, Y_cls) )\n print(confusion_matrix(Y_test, Y_cls, labels=[0, 1]))\n ## Plot 0 probability including overtraining test\n plt.figure(figsize=(8, 8))\n label = 1\n #Test prediction\n plt.hist(Y_pred[Y_test == label], alpha=0.5,\n color='red', range=[0, 1], bins=10)\n plt.hist(Y_pred[Y_test != label], alpha=0.5,\n color='blue', range=[0, 1], bins=10)\n #Train prediction\n Y_train_pred = model.predict(X_train)\n plt.hist(Y_train_pred[Y_train == label], alpha=0.5, color='red', range=[\n 0, 1], bins=10, histtype='step', linewidth=2)\n plt.hist(Y_train_pred[Y_train != label], alpha=0.5, color='blue', range=[\n 0, 1], bins=10, histtype='step', linewidth=2)\n plt.legend(['train == 1', 'train == 0', 'test == 1',\n 'test == 0'], loc='upper right')\n plt.xlabel('Probability of being real news')\n plt.ylabel('Number of entries')\n plt.savefig(\"../build/plots/hist_rnn_test.pdf\")\n plt.clf()\n\n\n\ndf = pd.read_csv(\"../build/preprocessed/labeled_content_lem_stop.csv\")\ndf = df.dropna()\n#df = df.iloc[0:800]\nX = df[\"content\"]\ny = df[\"label\"]\nprint(np.count_nonzero(y==1),np.count_nonzero(y==0),len(y))\ntop_words = 5000\nmax_text_length = 500\nseed=42\n\ntokenizer = Tokenizer(num_words=top_words, filters=r'!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)\ntokenizer.fit_on_texts(X)\nsequences = tokenizer.texts_to_sequences(X)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\ndata = pad_sequences(sequences, maxlen=max_text_length)\nprint('Shape of data tensor:', data.shape)\n\nx_train, X_test, y_train, y_test = train_test_split(\n data, y, test_size=0.3, random_state=seed, shuffle=True) #,stratify=y)\nprint(np.count_nonzero(y_train == 1), np.count_nonzero(y_train == 0), len(y_train))\nprint(np.count_nonzero(y_test == 1), np.count_nonzero(y_test == 0), len(y_test))\n\nX_train, X_val, Y_train, y_val = train_test_split(x_train, y_train, test_size=0.3,\n random_state=seed, shuffle=True)#, stratify=y)\n\nfilepath = '../model/best_rnn.hdf5'\ncheckpoint = ModelCheckpoint(\n filepath, monitor='accuracy', verbose=1, save_best_only=True)\n\nembedding_vecor_length = 32\nmodel = Sequential()\nmodel.add(Embedding(top_words, embedding_vecor_length,input_length=max_text_length))\nmodel.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(LSTM(128, dropout=0.4, recurrent_dropout=0.4))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n#history = model.fit(X_train, Y_train, validation_data=(X_val,y_val),\n# epochs=100, batch_size=8, callbacks=[checkpoint,\n# TensorBoard(log_dir='../build/graph', histogram_freq=50, write_graph=True)])\n#plot_history(history)\n\nbest_model = load_model('../model/best_rnn.hdf5')\nevaluate(X_test,y_test,X_train,Y_train,best_model)\n\ny_pred = best_model.predict(X_test, batch_size=8, verbose=1)\ny_pred_bool = best_model.predict_classes(X_test, batch_size=8, verbose=1)\n\nplt.imshow(confusion_matrix(y_test, y_pred_bool,labels=[0, 1]))\nplt.tight_layout()\nplt.colorbar()\nplt.xticks(range(2), [\"fake\", \"real\"])\nplt.yticks(range(2), [\"fake\", \"real\"])\nplt.savefig(\"../build/plots/cnfsn_mtx_rnn_test.pdf\")\nplt.clf()\n#y_pred = best_model.predict(X_val, batch_size=8, verbose=1)\n#y_pred_bool = best_model.predict_classes(X_val, batch_size=8, verbose=1)\n#print(classification_report(y_val, y_pred_bool))\n#print(confusion_matrix(y_val, y_pred_bool,labels=[0, 1]))\n#plt.imshow(confusion_matrix(y_val, y_pred_bool,labels=[0, 1]))\n#plt.tight_layout()\n#plt.colorbar()\n#plt.xticks(range(2), [\"fake\", \"real\"])\n#plt.yticks(range(2), [\"fake\", \"real\"])\n#plt.savefig(\"../build/plots/cnfsn_mtx_rnn_val.pdf\")\n#plt.clf()\n\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfpr, tpr, _ = roc_curve(y_test, y_pred)\nroc_auc = auc(fpr, tpr)\n\nplt.figure()\nlw = 2\nplt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\n#plt.show()\nplt.savefig(\"../build/plots/bow/RNN_bow_roc.pdf\")\nplt.close()\n\nX_false = []\nfor i in range(len(y_test)):\n if(y_test.iloc[i]!=y_pred_bool[i]):\n X_false.append(X_test[i])\n if(len(X_false)==3):\n break \n\nreverse_word_map = dict(map(reversed, tokenizer.word_index.items()))\n\ndef sequence_to_text(list_of_indices):\n words = [reverse_word_map.get(letter) for letter in list_of_indices]\n return(words)\n\nfalse_texts = list(map(sequence_to_text, X_false))\nwith open(\"../build/preprocessed/false_classified_rnn.csv\",\"w\") as file:\n for f in false_texts:\n f=list(filter(None,f))\n file.write(' '.join(f)) \n " ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "sklearn.metrics.classification_report", "matplotlib.pyplot.tight_layout", "pandas.read_csv", "matplotlib.pyplot.close", "numpy.count_nonzero", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.savefig", "sklearn.metrics.roc_curve", "sklearn.metrics.auc", "sklearn.metrics.recall_score", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "sklearn.metrics.accuracy_score" ] ]
2piruben/langil
[ "e2e41d8d00f7de9a1ba1c014d4bac8b364dbd856" ]
[ "examples/CellCycleVariability/bursttrans.py" ]
[ "import numpy as np\nfrom math import factorial\n\ndef f(p,k,d):\n return k[:-1]/(k[1:]+p*d[1:]) \n # f has one elements rest than the rest\n\ndef sumfact(p,beta,r,k,d):\n\n s = np.zeros(len(k)-1)\n for j in range(p):\n s = s + factorial(p)/factorial(j)*beta[1:]**(p-j-1)*factorial_moment(j,beta,r,k,d)[1:]\n return s\n\ndef gtilde(p,beta,r,k,d):\n\n # print('gtilde: ',sumfact(p,beta,r,k,d),'/',(k[1:]+p*d[1:]))\n return r[1:]*sumfact(p,beta,r,k,d)/(k[1:]+p*d[1:])\n\ndef g(p,beta,r,k,d):\n\n # print('gtilde: ',sumfact(p,beta,r,k,d),'/',(k[1:]+p*d[1:]))\n return r[1:]*p*factorial_moment(p-1,beta,r,k,d)[1:]/(k[1:]+p*d[1:])\n\ndef delta(p,k,d):\n# delta has N-1 elements, same than k\n delta_array = f(p,k,d)\n for i in range(1,len(delta_array)):\n delta_array[i] = delta_array[i]*delta_array[i-1]\n return delta_array\n\ndef thetatilde(p,beta,r,k,d):\n\n frac = gtilde(p,beta,r,k,d)/delta(p,k,d)\n return delta(p,k,d)*np.cumsum(frac)\n\ndef theta(p,beta,r,k,d):\n\n frac = g(p,beta,r,k,d)/delta(p,k,d)\n return delta(p,k,d)*np.cumsum(frac)\n\ndef pi_pop_Erlang(i,N): # probability of finding the cell in state i if the states are exp distributed with same rate\n\treturn (2**(1/N)-1)/(2**((i/N)-1))\n\ndef factorial_moment(p,beta,r,k,d,mode = 'constitutive'):\n # mode can be constitutive or bursty\n\n if (p==0):\n\n nf0 = np.zeros(len(k))\n nf10 = 0\n frac = k[1:]/k[:-1]\n for i in range(1,len(k)):\n p = 1\n for j in range(1,i):\n p = p*frac[j]\n nf10 += p\n\n nf0[0] = 1.0/(1 + nf10)\n \n for i in range(1,len(k)):\n # print('i',i)\n # print('nf0',nf0)\n nf0[i] = nf0[i-1]*frac[i-1]\n return nf0\n\n else:\n\n nfp = np.zeros(len(k))\n s = 0\n delt = delta(p,k,d)\n if mode == 'constitutive':\n theta_mode = theta(p,beta,r,k,d)\n s = p*factorial_moment(p-1,beta,r,k,d)[0]\n elif mode == 'bursty':\n theta_mode = thetatilde(p,beta,r,k,d)\n for j in range(p):\n s = s + factorial(p)/factorial(j)*beta[0]**(p-j-1)*factorial_moment(j,beta,r,k,d)[0]\n\n # print(\"sum for n1 {}\\n\".format(s))\n # print(\"first sum {}\\n\".format(2*r[0]*s))\n # print(\"second sum {}\\n\".format(k[-1]*0.5**(p-1)*thetatild[-1]))\n nf1p = 2*r[0]*s + k[-1]*0.5**(p-1)*theta_mode[-1]\n nf1p = nf1p/( 2*(d[0]*p + k[0]) - k[-1]*0.5**(p-1)*delt[-1])\n nfp[0] = nf1p\n nfp[1:] = delt*nf1p + theta_mode\n\n return nfp\n\ndef meantrajErlang(beta,r,k,d,W,N):\n# mean for the Erlang distribution with parameters beta,r,k,d, with N phases, W of them premitotic\n r_vec = r*np.ones(N)\n k_vec = k*np.ones(N)\n r_vec[W:] = r_vec[W:]*2 \n d_vec = d*np.ones(N)\n beta_vec = beta*np.ones(N)\n return sum(factorial_moment(1,beta_vec,r_vec,k_vec,d_vec))\n\n\ndef vartrajErlang(beta,r,k,d,W,N, mode = 'bursty'):\n# mean for the Erlang distribution with parameters beta,r,k,d, with N phases, W of them premitotic\n r_vec = r*np.ones(N)\n k_vec = k*np.ones(N)\n r_vec[W:] = r_vec[W:]*2 \n d_vec = d*np.ones(N)\n beta_vec = beta*np.ones(N)\n\n n1 = factorial_moment(1,beta_vec,r_vec,k_vec,d_vec, mode = mode)\n # print('n1',n1)\n n2 = factorial_moment(2,beta_vec,r_vec,k_vec,d_vec, mode = mode)\n # print('n2',n2)\n var = sum(n2) + sum(n1) - sum(n1)*sum(n1)\n return var\n\n\ndef meanpopErlang(beta,r,k,d,W,N, mode = 'bursty'):\n# mean for the Erlang distribution with parameters beta,r,k,d, with N phases, W of them premitotic\n r_vec = r*np.ones(N)\n k_vec = k*np.ones(N)*2**(1/N) # this is changed to get pop measurements\n r_vec[W:] = r_vec[W:]*2 \n d_vec = d*np.ones(N)\n beta_vec = beta*np.ones(N)\n\n n1 = factorial_moment(1,beta_vec,r_vec,k_vec,d_vec,mode = mode)/factorial_moment(0,beta_vec,r_vec,k_vec,d_vec,mode = mode)\n return sum(n1*pi_pop_Erlang(np.arange(N)+1,N))\n\n\ndef varpopErlang(beta,r,k,d,W,N,mode = 'bursty'):\n# mean for the Erlang distribution with parameters beta,r,k,d, with N phases, W of them premitotic\n r_vec = r*np.ones(N)\n k_vec = k*np.ones(N)*2**(1/N)\n r_vec[W:] = r_vec[W:]*2 \n d_vec = d*np.ones(N)\n beta_vec = beta*np.ones(N)\n\n n1 = factorial_moment(1,beta_vec,r_vec,k_vec,d_vec,mode = mode)/factorial_moment(0,beta_vec,r_vec,k_vec,d_vec,mode = mode)\n sum1 = np.sum(n1*pi_pop_Erlang(np.arange(N)+1,N))\n # print('n1',n1)\n n2 = factorial_moment(2,beta_vec,r_vec,k_vec,d_vec,mode = mode)/factorial_moment(0,beta_vec,r_vec,k_vec,d_vec,mode = mode)\n sum2 = np.sum(n2*pi_pop_Erlang(np.arange(N)+1,N))\n # print('n2',n2)\n var = sum2 + sum1 - sum1*sum1\n return var\n\n\n\n\n\n\n\n" ]
[ [ "numpy.arange", "numpy.cumsum", "numpy.ones" ] ]
crate19970523/keras_yolov3_01
[ "fc62c3c520ffc9339b798344911aa45973157250" ]
[ "yolo.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'logs/000/trained_weights_final.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/my_classes.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 0,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\nif __name__ == '__main__':\n yolo=YOLO()\n path = '/imgtest/1.jpg'\n try:\n image = Image.open(path)\n except:\n print('Open Error! Try again!')\n else:\n r_image, _ = yolo.detect_image(image)\n r_image.show()\n\n yolo.close_session()" ]
[ [ "numpy.expand_dims", "numpy.random.seed", "numpy.asarray", "numpy.random.shuffle", "numpy.floor", "numpy.array" ] ]
mkskeller/EzPC
[ "f3cbdd46d014aee819c4a877911a709dffd345e5" ]
[ "Athos/TFCompiler/DumpTFMtData.py" ]
[ "'''\r\n\r\nAuthors: Nishant Kumar.\r\n\r\nCopyright:\r\nCopyright (c) 2018 Microsoft Research\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\r\n'''\r\n\r\nimport numpy\r\nimport tensorflow.compat.v1 as tf\r\ntf.disable_eager_execution()\r\n\r\ndef save_graph_metadata(output_tensor, sess, feed_dict):\r\n #First save the graph def\r\n graph_def = tf.get_default_graph().as_graph_def()\r\n optimized_graph_def = graph_def\r\n with open('./graphDef.mtdata', 'w') as f:\r\n f.write(str(optimized_graph_def))\r\n with open('./graphDef.bin', 'wb') as f:\r\n f.write(optimized_graph_def.SerializeToString())\r\n\r\n # Save size information for tensors on which output depends\r\n tensors_to_evaluate = []\r\n tensors_to_evaluate_names = []\r\n graph = tf.get_default_graph()\r\n for node in optimized_graph_def.node:\r\n if graph.get_operation_by_name(node.name).outputs:\r\n cur_output = graph.get_operation_by_name(node.name).outputs[0]\r\n tensors_to_evaluate.append(cur_output)\r\n tensors_to_evaluate_names.append(node.name)\r\n tensors_evaluated = sess.run(tensors_to_evaluate, feed_dict)\r\n tensors_shape = list(map(lambda x : x.shape, tensors_evaluated))\r\n\r\n # Write size info in a file\r\n with open('./sizeInfo.mtdata','w') as f:\r\n for ii, curr in enumerate(tensors_to_evaluate_names):\r\n curShape = tensors_shape[ii]\r\n f.write(tensors_to_evaluate_names[ii] + ' ')\r\n for dim in curShape:\r\n f.write(str(dim)+' ')\r\n f.write('\\n')\r\n\r\n return optimized_graph_def\r\n\r\ndef dumpImageDataInt(imgData, filename, scalingFac, writeMode):\r\n print(\"Dumping image data...\")\r\n with open(filename, writeMode) as ff:\r\n for xx in numpy.nditer(imgData, order='C'):\r\n ff.write(str(int(xx * (1<<scalingFac))) + ' ')\r\n ff.write('\\n\\n')\r\n\r\ndef dumpInt(ff, tensor, scalingFac, sess, update=lambda x: x):\r\n tensor = sess.run(tensor)\r\n for xx in numpy.nditer(tensor, order='C'):\r\n ff.write((str(int(update(xx) * (1<<scalingFac)))) + ' ')\r\n ff.write('\\n\\n')\r\n\r\ndef dumpWeightsInt(filename, scalingFac, writeMode, sess):\r\n with open(filename, writeMode) as ff:\r\n for op in tf.get_default_graph().get_operations():\r\n if op.type in ('Conv2D', 'BiasAdd', 'MatMul'):\r\n dumpInt(ff, op.inputs[1], scalingFac, sess)\r\n elif op.type in ('FusedBatchNorm', 'FusedBatchNormV3'):\r\n gamma, beta, mu, variance = op.inputs[1:]\r\n\r\n epsilon = 1e-5 # Taken from non-fused BN of TF\r\n rsigma = tf.rsqrt(variance + epsilon)\r\n\r\n gamma = gamma * rsigma\r\n dumpInt(ff, gamma, scalingFac, sess)\r\n dumpInt(ff, beta - gamma * mu, scalingFac, sess)\r\n dumpInt(ff, tf.zeros(tf.shape(mu)), scalingFac, sess)\r\n dumpInt(ff, tf.fill(tf.shape(variance), 1-epsilon), scalingFac, sess)\r\n\r\ndef dumpImgAndWeightsData2(sess, imgData, filename, scalingFac):\r\n print(\"Starting to dump data...\")\r\n dumpImageDataInt(imgData, filename, scalingFac, 'w')\r\n dumpWeightsInt(filename, scalingFac, 'a', sess)\r\n" ]
[ [ "tensorflow.compat.v1.get_default_graph", "numpy.nditer", "tensorflow.compat.v1.rsqrt", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.disable_eager_execution" ] ]
rob-nn/BuildingMachineLearningSystemsWithPython
[ "cf02b13a569126a46cf75200e85be3ecb410fc64" ]
[ "ch02/threshold.py" ]
[ "# This code is supporting material for the book\n# Building Machine Learning Systems with Python\n# by Willi Richert and Luis Pedro Coelho\n# published by PACKT Publishing\n#\n# It is made available under the MIT License\n\nimport numpy as np\n\n\n# This function was called ``learn_model`` in the first edition\ndef fit_model(features, labels):\n '''Learn a simple threshold model'''\n best_acc = -1.0\n # Loop over all the features:\n for fi in range(features.shape[1]):\n thresh = features[:, fi].copy()\n # test all feature values in order:\n thresh.sort()\n for t in thresh:\n pred = (features[:, fi] > t)\n\n # Measure the accuracy of this \n acc = (pred == labels).mean()\n if acc > best_acc:\n best_acc = acc\n best_fi = fi\n best_t = t\n\n # A model is a threshold and an index\n return best_t, best_fi\n\n\n# This function was called ``apply_model`` in the first edition\ndef predict(features, model):\n '''Apply a learned model'''\n # A model is a pair as returned by fit_model\n t, fi = model\n return features[:, fi] > t\n\ndef accuracy(features, labels, model):\n '''Compute the accuracy of the model'''\n preds = predict(features, model)\n return np.mean(preds == labels)\n" ]
[ [ "numpy.mean" ] ]
JuliaChae/M3D-RPN-Waymo
[ "e73cba585563a094f67a2ba184a22330c134857c", "e73cba585563a094f67a2ba184a22330c134857c" ]
[ "eval/rotate_iou.py", "scripts/config/kitti_3d_multi_main.py" ]
[ "#####################\n# Based on https://github.com/hongzhenwang/RRPN-revise\n# Licensed under The MIT License\n# Author: yanyan, [email protected]\n#####################\nimport math\n\nimport numba\nimport numpy as np\nfrom numba import cuda\n\[email protected](nopython=True)\ndef div_up(m, n):\n return m // n + (m % n > 0)\n\[email protected]('(float32[:], float32[:], float32[:])', device=True, inline=True)\ndef trangle_area(a, b, c):\n return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *\n (b[0] - c[0])) / 2.0\n\n\[email protected]('(float32[:], int32)', device=True, inline=True)\ndef area(int_pts, num_of_inter):\n area_val = 0.0\n for i in range(num_of_inter - 2):\n area_val += abs(\n trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],\n int_pts[2 * i + 4:2 * i + 6]))\n return area_val\n\n\[email protected]('(float32[:], int32)', device=True, inline=True)\ndef sort_vertex_in_convex_polygon(int_pts, num_of_inter):\n if num_of_inter > 0:\n center = cuda.local.array((2, ), dtype=numba.float32)\n center[:] = 0.0\n for i in range(num_of_inter):\n center[0] += int_pts[2 * i]\n center[1] += int_pts[2 * i + 1]\n center[0] /= num_of_inter\n center[1] /= num_of_inter\n v = cuda.local.array((2, ), dtype=numba.float32)\n vs = cuda.local.array((16, ), dtype=numba.float32)\n for i in range(num_of_inter):\n v[0] = int_pts[2 * i] - center[0]\n v[1] = int_pts[2 * i + 1] - center[1]\n d = math.sqrt(v[0] * v[0] + v[1] * v[1])\n v[0] = v[0] / d\n v[1] = v[1] / d\n if v[1] < 0:\n v[0] = -2 - v[0]\n vs[i] = v[0]\n j = 0\n temp = 0\n for i in range(1, num_of_inter):\n if vs[i - 1] > vs[i]:\n temp = vs[i]\n tx = int_pts[2 * i]\n ty = int_pts[2 * i + 1]\n j = i\n while j > 0 and vs[j - 1] > temp:\n vs[j] = vs[j - 1]\n int_pts[j * 2] = int_pts[j * 2 - 2]\n int_pts[j * 2 + 1] = int_pts[j * 2 - 1]\n j -= 1\n\n vs[j] = temp\n int_pts[j * 2] = tx\n int_pts[j * 2 + 1] = ty\n\n\[email protected](\n '(float32[:], float32[:], int32, int32, float32[:])',\n device=True,\n inline=True)\ndef line_segment_intersection(pts1, pts2, i, j, temp_pts):\n A = cuda.local.array((2, ), dtype=numba.float32)\n B = cuda.local.array((2, ), dtype=numba.float32)\n C = cuda.local.array((2, ), dtype=numba.float32)\n D = cuda.local.array((2, ), dtype=numba.float32)\n\n A[0] = pts1[2 * i]\n A[1] = pts1[2 * i + 1]\n\n B[0] = pts1[2 * ((i + 1) % 4)]\n B[1] = pts1[2 * ((i + 1) % 4) + 1]\n\n C[0] = pts2[2 * j]\n C[1] = pts2[2 * j + 1]\n\n D[0] = pts2[2 * ((j + 1) % 4)]\n D[1] = pts2[2 * ((j + 1) % 4) + 1]\n BA0 = B[0] - A[0]\n BA1 = B[1] - A[1]\n DA0 = D[0] - A[0]\n CA0 = C[0] - A[0]\n DA1 = D[1] - A[1]\n CA1 = C[1] - A[1]\n acd = DA1 * CA0 > CA1 * DA0\n bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])\n if acd != bcd:\n abc = CA1 * BA0 > BA1 * CA0\n abd = DA1 * BA0 > BA1 * DA0\n if abc != abd:\n DC0 = D[0] - C[0]\n DC1 = D[1] - C[1]\n ABBA = A[0] * B[1] - B[0] * A[1]\n CDDC = C[0] * D[1] - D[0] * C[1]\n DH = BA1 * DC0 - BA0 * DC1\n Dx = ABBA * DC0 - BA0 * CDDC\n Dy = ABBA * DC1 - BA1 * CDDC\n temp_pts[0] = Dx / DH\n temp_pts[1] = Dy / DH\n return True\n return False\n\n\[email protected](\n '(float32[:], float32[:], int32, int32, float32[:])',\n device=True,\n inline=True)\ndef line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):\n a = cuda.local.array((2, ), dtype=numba.float32)\n b = cuda.local.array((2, ), dtype=numba.float32)\n c = cuda.local.array((2, ), dtype=numba.float32)\n d = cuda.local.array((2, ), dtype=numba.float32)\n\n a[0] = pts1[2 * i]\n a[1] = pts1[2 * i + 1]\n\n b[0] = pts1[2 * ((i + 1) % 4)]\n b[1] = pts1[2 * ((i + 1) % 4) + 1]\n\n c[0] = pts2[2 * j]\n c[1] = pts2[2 * j + 1]\n\n d[0] = pts2[2 * ((j + 1) % 4)]\n d[1] = pts2[2 * ((j + 1) % 4) + 1]\n\n area_abc = trangle_area(a, b, c)\n area_abd = trangle_area(a, b, d)\n\n if area_abc * area_abd >= 0:\n return False\n\n area_cda = trangle_area(c, d, a)\n area_cdb = area_cda + area_abc - area_abd\n\n if area_cda * area_cdb >= 0:\n return False\n t = area_cda / (area_abd - area_abc)\n\n dx = t * (b[0] - a[0])\n dy = t * (b[1] - a[1])\n temp_pts[0] = a[0] + dx\n temp_pts[1] = a[1] + dy\n return True\n\n\[email protected]('(float32, float32, float32[:])', device=True, inline=True)\ndef point_in_quadrilateral(pt_x, pt_y, corners):\n ab0 = corners[2] - corners[0]\n ab1 = corners[3] - corners[1]\n\n ad0 = corners[6] - corners[0]\n ad1 = corners[7] - corners[1]\n\n ap0 = pt_x - corners[0]\n ap1 = pt_y - corners[1]\n\n abab = ab0 * ab0 + ab1 * ab1\n abap = ab0 * ap0 + ab1 * ap1\n adad = ad0 * ad0 + ad1 * ad1\n adap = ad0 * ap0 + ad1 * ap1\n \n eps=0.001\n return abab >= abap-eps and abap >= 0-eps and adad >= adap-eps and adap >= 0-eps\n\[email protected]('(float32[:], float32[:], float32[:])', device=True, inline=True)\ndef quadrilateral_intersection(pts1, pts2, int_pts):\n num_of_inter = 0\n for i in range(4):\n if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):\n int_pts[num_of_inter * 2] = pts1[2 * i]\n int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]\n num_of_inter += 1\n if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):\n int_pts[num_of_inter * 2] = pts2[2 * i]\n int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]\n num_of_inter += 1\n temp_pts = cuda.local.array((2, ), dtype=numba.float32)\n for i in range(4):\n for j in range(4):\n has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)\n if has_pts:\n int_pts[num_of_inter * 2] = temp_pts[0]\n int_pts[num_of_inter * 2 + 1] = temp_pts[1]\n num_of_inter += 1\n\n return num_of_inter\n\n\[email protected]('(float32[:], float32[:])', device=True, inline=True)\ndef rbbox_to_corners(corners, rbbox):\n # generate clockwise corners and rotate it clockwise\n angle = rbbox[4]\n a_cos = math.cos(angle)\n a_sin = math.sin(angle)\n center_x = rbbox[0]\n center_y = rbbox[1]\n x_d = rbbox[2]\n y_d = rbbox[3]\n corners_x = cuda.local.array((4, ), dtype=numba.float32)\n corners_y = cuda.local.array((4, ), dtype=numba.float32)\n corners_x[0] = -x_d / 2\n corners_x[1] = -x_d / 2\n corners_x[2] = x_d / 2\n corners_x[3] = x_d / 2\n corners_y[0] = -y_d / 2\n corners_y[1] = y_d / 2\n corners_y[2] = y_d / 2\n corners_y[3] = -y_d / 2\n for i in range(4):\n corners[2 *\n i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x\n corners[2 * i\n + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y\n\n\[email protected]('(float32[:], float32[:])', device=True, inline=True)\ndef inter(rbbox1, rbbox2):\n corners1 = cuda.local.array((8, ), dtype=numba.float32)\n corners2 = cuda.local.array((8, ), dtype=numba.float32)\n intersection_corners = cuda.local.array((16, ), dtype=numba.float32)\n\n rbbox_to_corners(corners1, rbbox1)\n rbbox_to_corners(corners2, rbbox2)\n\n num_intersection = quadrilateral_intersection(corners1, corners2,\n intersection_corners)\n sort_vertex_in_convex_polygon(intersection_corners, num_intersection)\n # print(intersection_corners.reshape([-1, 2])[:num_intersection])\n\n return area(intersection_corners, num_intersection)\n\n\[email protected]('(float32[:], float32[:], int32)', device=True, inline=True)\ndef devRotateIoUEval(rbox1, rbox2, criterion=-1):\n area1 = rbox1[2] * rbox1[3]\n area2 = rbox2[2] * rbox2[3]\n area_inter = inter(rbox1, rbox2)\n if criterion == -1:\n return area_inter / (area1 + area2 - area_inter)\n elif criterion == 0:\n return area_inter / area1\n elif criterion == 1:\n return area_inter / area2\n else:\n return area_inter\n\[email protected]('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)\ndef rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):\n threadsPerBlock = 8 * 8\n row_start = cuda.blockIdx.x\n col_start = cuda.blockIdx.y\n tx = cuda.threadIdx.x\n row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)\n col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)\n block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)\n block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)\n\n dev_query_box_idx = threadsPerBlock * col_start + tx\n dev_box_idx = threadsPerBlock * row_start + tx\n if (tx < col_size):\n block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]\n block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]\n block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]\n block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]\n block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]\n if (tx < row_size):\n block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]\n block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]\n block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]\n block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]\n block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]\n cuda.syncthreads()\n if tx < row_size:\n for i in range(col_size):\n offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i\n dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],\n block_boxes[tx * 5:tx * 5 + 5], criterion)\n\n\ndef rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):\n \"\"\"rotated box iou running in gpu. 500x faster than cpu version\n (take 5ms in one example with numba.cuda code).\n convert from [this project](\n https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).\n \n Args:\n boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, \n angles(clockwise when positive)\n query_boxes (float tensor: [K, 5]): [description]\n device_id (int, optional): Defaults to 0. [description]\n \n Returns:\n [type]: [description]\n \"\"\"\n box_dtype = boxes.dtype\n boxes = boxes.astype(np.float32)\n query_boxes = query_boxes.astype(np.float32)\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n iou = np.zeros((N, K), dtype=np.float32)\n if N == 0 or K == 0:\n return iou\n threadsPerBlock = 8 * 8\n cuda.select_device(device_id)\n blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))\n \n stream = cuda.stream()\n with stream.auto_synchronize():\n boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)\n query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)\n iou_dev = cuda.to_device(iou.reshape([-1]), stream)\n rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](\n N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)\n iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)\n return iou.astype(boxes.dtype)\n", "from easydict import EasyDict as edict\nimport numpy as np\n\ndef Config():\n\n conf = edict()\n \n # ----------------------------------------\n # general\n # ----------------------------------------\n\n conf.model = 'densenet121_3d_dilate_depth_aware'\n \n # solver settings\n conf.solver_type = 'sgd'\n conf.lr = 0.004\n conf.momentum = 0.9\n conf.weight_decay = 0.0005\n conf.max_iter = 50000\n conf.snapshot_iter = 10000\n conf.display = 250\n conf.do_test = True\n \n # sgd parameters\n conf.lr_policy = 'poly'\n conf.lr_steps = None\n conf.lr_target = conf.lr * 0.00001\n \n # random\n conf.rng_seed = 2\n conf.cuda_seed = 2\n \n # misc network\n conf.image_means = [0.485, 0.456, 0.406]\n conf.image_stds = [0.229, 0.224, 0.225]\n conf.feat_stride = 16\n \n conf.has_3d = True\n\n # ----------------------------------------\n # image sampling and datasets\n # ----------------------------------------\n\n # scale sampling \n conf.test_scale = 512\n conf.crop_size = [512, 1760]\n conf.mirror_prob = 0.50\n conf.distort_prob = -1\n \n # datasets\n conf.dataset_test = 'kitti_split1'\n conf.datasets_train = [{'name': 'kitti_split1', 'anno_fmt': 'kitti_det', 'im_ext': '.png', 'scale': 1}]\n conf.use_3d_for_2d = True\n conf.dataset_type = 'KittiDataset'\n \n # percent expected height ranges based on test_scale\n # used for anchor selection \n conf.percent_anc_h = [0.0625, 0.75]\n \n # labels settings\n conf.min_gt_h = conf.test_scale*conf.percent_anc_h[0]\n conf.max_gt_h = conf.test_scale*conf.percent_anc_h[1]\n conf.min_gt_vis = 0.65\n conf.ilbls = ['Van', 'ignore']\n conf.lbls = ['Car', 'Pedestrian', 'Cyclist']\n \n # ----------------------------------------\n # detection sampling\n # ----------------------------------------\n \n # detection sampling\n conf.batch_size = 1\n conf.fg_image_ratio = 1.0\n conf.box_samples = 0.20\n conf.fg_fraction = 0.20\n conf.bg_thresh_lo = 0\n conf.bg_thresh_hi = 0.5\n conf.fg_thresh = 0.5\n conf.ign_thresh = 0.5\n conf.best_thresh = 0.35\n\n # ----------------------------------------\n # inference and testing\n # ----------------------------------------\n\n # nms\n conf.nms_topN_pre = 3000\n conf.nms_topN_post = 40\n conf.nms_thres = 0.4\n conf.clip_boxes = False\n\n conf.test_protocol = 'kitti'\n conf.test_db = 'kitti'\n conf.test_min_h = 0\n conf.min_det_scales = [0, 0]\n\n # ----------------------------------------\n # anchor settings\n # ----------------------------------------\n \n # clustering settings\n conf.cluster_anchors = 0\n conf.even_anchors = 0\n conf.expand_anchors = 0\n \n conf.anchors = None\n\n conf.bbox_means = None\n conf.bbox_stds = None\n \n # initialize anchors\n base = (conf.max_gt_h / conf.min_gt_h) ** (1 / (12 - 1))\n conf.anchor_scales = np.array([conf.min_gt_h * (base ** i) for i in range(0, 12)])\n conf.anchor_ratios = np.array([0.5, 1.0, 1.5])\n \n # loss logic\n conf.hard_negatives = True\n conf.focal_loss = 0\n conf.cls_2d_lambda = 1\n conf.iou_2d_lambda = 1\n conf.bbox_2d_lambda = 0\n conf.bbox_3d_lambda = 1\n conf.bbox_3d_proj_lambda = 0.0\n \n conf.hill_climbing = True\n \n conf.bins = 32\n \n # visdom\n conf.visdom_port = 8100\n \n conf.pretrained = 'output/kitti_3d_multi_warmup/weights/model_50000_pkl'\n\n return conf\n\n" ]
[ [ "numpy.zeros" ], [ "numpy.array" ] ]
viz4biz/PyDataNYC2015
[ "066154ea9f1837c355e6108a28b85889f3020da3" ]
[ "vtklib/datasources.py" ]
[ "\"\"\"\nData sources\n\"\"\"\n\nimport math\nimport numpy\n\nft = lambda n: n / 10.\n\n\ndef f(x, y):\n \"\"\"\n sample func\n \"\"\"\n return x * x - y * y\n\n\ndef fx(x, y):\n \"\"\"\n sample func\n \"\"\"\n return 2 * x - 2 * y\n\n\ndef g(x, y):\n \"\"\"\n sample func\n \"\"\"\n return math.exp(-(x * x + y * y)) * math.sin(x) * math.cos(y)\n\n\ndef sin(x, y):\n \"\"\"\n sample func\n \"\"\"\n return numpy.sin(x * y) / (x * y)\n\n\ndef dsin():\n \"\"\"\n sample func\n \"\"\"\n xr = numpy.arange(-7., 7.05, 0.1)\n yr = numpy.arange(-5., 5.05, 0.05)\n data = [(x, [(y, sin(x, y)) for y in yr]) for x in xr]\n return data\n\n\ndef nonedatarandom():\n \"\"\"\n sample func\n \"\"\"\n xr = xrange(100)\n yr = xrange(150)\n ran = numpy.random.randint(1, 100 * 150, 500)\n\n data = []\n\n ind = 0\n\n for x in xr:\n datax = []\n for y in yr:\n if ind in ran:\n datax.append((y, numpy.nan))\n else:\n datax.append((y, f(x, y)))\n ind += 1\n data.append((x, datax))\n\n return data\n\n\ndef nonedata():\n \"\"\"\n sample func\n \"\"\"\n xr = xrange(100)\n yr = xrange(150)\n data = []\n\n for x in xr:\n datax = []\n for y in yr:\n if x % 5 == 0:\n datax.append((y, numpy.nan))\n else:\n datax.append((y, f(x, y)))\n data.append((x, datax))\n\n return data\n\n\ndef dv():\n data = [(x, [(y, f(x, y), fx(x, y)) for y in xrange(-5, 6)]) for x in xrange(-4, 5)]\n return data\n\n\ndef dt():\n data = [(x, [(y, f(x, y)) for y in xrange(-5, 6)]) for x in xrange(-4, 5)]\n return data\n\n\ndef dx():\n data = [(x, [(y, f(x, y)) for y in xrange(0, 10)]) for x in xrange(0, 20)]\n return data\n\n\ndef dtx():\n data = [(ft(x), [(ft(y), g(ft(x), ft(y))) for y in xrange(-50, 60)]) for x in xrange(-40, 50)]\n return data\n\n\ndef dtv():\n data = [(ft(x), [(ft(y), f(ft(x), ft(y)), fx(ft(x), ft(y))) for y in xrange(-50, 60)]) for x in xrange(-40, 50)]\n return data\n\n\n# Example of data with 2D numpy array\ndef zdata():\n data = numpy.array([[(lambda x, y: x * x - y * y)(x, y) for y in xrange(0, 10)] for x in xrange(0, 20)])\n return data\n\n\ndef zerodata():\n data = [(x, [(0, f(x, y)) for y in xrange(0, 10)]) for x in xrange(0, 8)]\n return data\n\n\ndef flatdata():\n data = [(x, [(y, 1) for y in xrange(0, 10)]) for x in xrange(0, 8)]\n return data\n\n\ndef contdata():\n data = [(x, [(y, 1, f(x, y)) for y in xrange(0, 10)]) for x in xrange(0, 8)]\n return data\n\n\ndef timeData():\n \"\"\" sample random time data \"\"\"\n import random\n import datetime\n import pytz\n\n t = datetime.datetime(2013, 5, 23, 12, 15, 15, 0, tzinfo=pytz.UTC)\n data = []\n\n for _ in xrange(100):\n t = t + datetime.timedelta(0, random.randint(10, 60), 0)\n d = random.randint(10, 100)\n data.append((t, d))\n\n return [data]\n\n\ndef timeTSData():\n \"\"\" sample random time data \"\"\"\n import random\n import datetime\n import pytz\n\n t = datetime.datetime(2013, 5, 23, 12, 15, 15, 0, tzinfo=pytz.UTC)\n data = []\n\n for _ in xrange(100):\n t = t + datetime.timedelta(0, random.randint(10, 60), 0)\n d = random.randint(10, 100)\n data.append((t, d))\n\n return [data]\n\n\ndef grid_data():\n \"\"\"\n sample grid data\n \"\"\"\n from numpy.random import uniform, seed\n from matplotlib.mlab import griddata\n import numpy as np\n\n seed(0)\n npts = 200\n x = uniform(-2, 2, npts)\n y = uniform(-2, 2, npts)\n z = x*np.exp(-x**2 - y**2)\n # define grid.\n xi = np.linspace(-2.1, 2.1, 100)\n yi = np.linspace(-2.1, 2.1, 200)\n # grid the data.\n zi = griddata(x, y, z, xi, yi, interp='linear')\n\n return xi, yi, zi\n\n" ]
[ [ "matplotlib.mlab.griddata", "numpy.linspace", "numpy.random.seed", "numpy.arange", "numpy.sin", "numpy.random.uniform", "numpy.exp", "numpy.random.randint" ] ]
weslowrie/pymor
[ "6e283d03b413db5bb58e6d67ef467456d3a2bbf2", "6e283d03b413db5bb58e6d67ef467456d3a2bbf2", "e8b18d2d4c4b5998f0bd84f6728e365e0693b753" ]
[ "src/pymor/algorithms/gram_schmidt.py", "src/pymor/algorithms/sylvester.py", "src/pymortests/sylvester.py" ]
[ "# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nimport numpy as np\n\nfrom pymor.core.defaults import defaults\nfrom pymor.core.exceptions import AccuracyError\nfrom pymor.core.logger import getLogger\n\n\n@defaults('atol', 'rtol', 'reiterate', 'reiteration_threshold', 'check', 'check_tol')\ndef gram_schmidt(A, product=None, return_R=False, atol=1e-13, rtol=1e-13, offset=0,\n reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,\n copy=True):\n \"\"\"Orthonormalize a |VectorArray| using the modified Gram-Schmidt algorithm.\n\n Parameters\n ----------\n A\n The |VectorArray| which is to be orthonormalized.\n product\n The inner product |Operator| w.r.t. which to orthonormalize.\n If `None`, the Euclidean product is used.\n return_R\n If `True`, the R matrix from QR decomposition is returned.\n atol\n Vectors of norm smaller than `atol` are removed from the array.\n rtol\n Relative tolerance used to detect linear dependent vectors\n (which are then removed from the array).\n offset\n Assume that the first `offset` vectors are already orthonormal and start the\n algorithm at the `offset + 1`-th vector.\n reiterate\n If `True`, orthonormalize again if the norm of the orthogonalized vector is\n much smaller than the norm of the original vector.\n reiteration_threshold\n If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of\n the orthogonalized vector and the original vector is smaller than this value.\n check\n If `True`, check if the resulting |VectorArray| is really orthonormal.\n check_tol\n Tolerance for the check.\n copy\n If `True`, create a copy of `A` instead of modifying `A` in-place.\n\n Returns\n -------\n Q\n The orthonormalized |VectorArray|.\n R\n The upper-triangular/trapezoidal matrix (if `compute_R` is `True`).\n \"\"\"\n\n logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt')\n\n if copy:\n A = A.copy()\n\n # main loop\n R = np.eye(len(A))\n remove = [] # indices of to be removed vectors\n for i in range(offset, len(A)):\n # first calculate norm\n initial_norm = A[i].norm(product)[0]\n\n if initial_norm < atol:\n logger.info(f\"Removing vector {i} of norm {initial_norm}\")\n remove.append(i)\n continue\n\n if i == 0:\n A[0].scal(1 / initial_norm)\n R[i, i] = initial_norm\n else:\n norm = initial_norm\n # If reiterate is True, reiterate as long as the norm of the vector changes\n # strongly during orthogonalization (due to Andreas Buhr).\n while True:\n # orthogonalize to all vectors left\n for j in range(i):\n if j in remove:\n continue\n p = A[j].pairwise_inner(A[i], product)[0]\n A[i].axpy(-p, A[j])\n common_dtype = np.promote_types(R.dtype, type(p))\n R = R.astype(common_dtype, copy=False)\n R[j, i] += p\n\n # calculate new norm\n old_norm, norm = norm, A[i].norm(product)[0]\n\n # remove vector if it got too small\n if norm < rtol * initial_norm:\n logger.info(f\"Removing linearly dependent vector {i}\")\n remove.append(i)\n break\n\n # check if reorthogonalization should be done\n if reiterate and norm < reiteration_threshold * old_norm:\n logger.info(f\"Orthonormalizing vector {i} again\")\n else:\n A[i].scal(1 / norm)\n R[i, i] = norm\n break\n\n if remove:\n del A[remove]\n R = np.delete(R, remove, axis=0)\n\n if check:\n error_matrix = A[offset:len(A)].inner(A, product)\n error_matrix[:len(A) - offset, offset:len(A)] -= np.eye(len(A) - offset)\n if error_matrix.size > 0:\n err = np.max(np.abs(error_matrix))\n if err >= check_tol:\n raise AccuracyError(f\"result not orthogonal (max err={err})\")\n\n if return_R:\n return A, R\n else:\n return A\n\n\ndef gram_schmidt_biorth(V, W, product=None,\n reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,\n copy=True):\n \"\"\"Biorthonormalize a pair of |VectorArrays| using the biorthonormal Gram-Schmidt process.\n\n See Algorithm 1 in [BKS11]_.\n\n Parameters\n ----------\n V, W\n The |VectorArrays| which are to be biorthonormalized.\n product\n The inner product |Operator| w.r.t. which to biorthonormalize.\n If `None`, the Euclidean product is used.\n reiterate\n If `True`, orthonormalize again if the norm of the orthogonalized vector is\n much smaller than the norm of the original vector.\n reiteration_threshold\n If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of\n the orthogonalized vector and the original vector is smaller than this value.\n check\n If `True`, check if the resulting |VectorArray| is really orthonormal.\n check_tol\n Tolerance for the check.\n copy\n If `True`, create a copy of `V` and `W` instead of modifying `V` and `W` in-place.\n\n\n Returns\n -------\n The biorthonormalized |VectorArrays|.\n \"\"\"\n assert V.space == W.space\n assert len(V) == len(W)\n\n logger = getLogger('pymor.algorithms.gram_schmidt.gram_schmidt_biorth')\n\n if copy:\n V = V.copy()\n W = W.copy()\n\n # main loop\n for i in range(len(V)):\n # calculate norm of V[i]\n initial_norm = V[i].norm(product)[0]\n\n # project V[i]\n if i == 0:\n V[0].scal(1 / initial_norm)\n else:\n norm = initial_norm\n # If reiterate is True, reiterate as long as the norm of the vector changes\n # strongly during projection.\n while True:\n for j in range(i):\n # project by (I - V[j] * W[j]^T * E)\n p = W[j].pairwise_inner(V[i], product)[0]\n V[i].axpy(-p, V[j])\n\n # calculate new norm\n old_norm, norm = norm, V[i].norm(product)[0]\n\n # check if reorthogonalization should be done\n if reiterate and norm < reiteration_threshold * old_norm:\n logger.info(f\"Projecting vector V[{i}] again\")\n else:\n V[i].scal(1 / norm)\n break\n\n # calculate norm of W[i]\n initial_norm = W[i].norm(product)[0]\n\n # project W[i]\n if i == 0:\n W[0].scal(1 / initial_norm)\n else:\n norm = initial_norm\n # If reiterate is True, reiterate as long as the norm of the vector changes\n # strongly during projection.\n while True:\n for j in range(i):\n # project by (I - W[j] * V[j]^T * E)\n p = V[j].pairwise_inner(W[i], product)[0]\n W[i].axpy(-p, W[j])\n\n # calculate new norm\n old_norm, norm = norm, W[i].norm(product)[0]\n\n # check if reorthogonalization should be done\n if reiterate and norm < reiteration_threshold * old_norm:\n logger.info(f\"Projecting vector W[{i}] again\")\n else:\n W[i].scal(1 / norm)\n break\n\n # rescale V[i]\n p = W[i].pairwise_inner(V[i], product)[0]\n V[i].scal(1 / p)\n\n if check:\n error_matrix = W.inner(V, product)\n error_matrix -= np.eye(len(V))\n if error_matrix.size > 0:\n err = np.max(np.abs(error_matrix))\n if err >= check_tol:\n raise AccuracyError(f\"result not biorthogonal (max err={err})\")\n\n return V, W\n", "# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nimport scipy.linalg as spla\n\nfrom pymor.algorithms.to_matrix import to_matrix\nfrom pymor.operators.interface import Operator\nfrom pymor.operators.constructions import IdentityOperator\n\n\ndef solve_sylv_schur(A, Ar, E=None, Er=None, B=None, Br=None, C=None, Cr=None):\n r\"\"\"Solve Sylvester equation by Schur decomposition.\n\n Solves Sylvester equation\n\n .. math::\n A V E_r^T + E V A_r^T + B B_r^T = 0\n\n or\n\n .. math::\n A^T W E_r + E^T W A_r + C^T C_r = 0\n\n or both using (generalized) Schur decomposition (Algorithms 3 and 4\n in [BKS11]_), if the necessary parameters are given.\n\n Parameters\n ----------\n A\n Real |Operator|.\n Ar\n Real |Operator|.\n It is converted into a |NumPy array| using\n :func:`~pymor.algorithms.to_matrix.to_matrix`.\n E\n Real |Operator| or `None` (then assumed to be the identity).\n Er\n Real |Operator| or `None` (then assumed to be the identity).\n It is converted into a |NumPy array| using\n :func:`~pymor.algorithms.to_matrix.to_matrix`.\n B\n Real |Operator| or `None`.\n Br\n Real |Operator| or `None`.\n It is assumed that `Br.range.from_numpy` is implemented.\n C\n Real |Operator| or `None`.\n Cr\n Real |Operator| or `None`.\n It is assumed that `Cr.source.from_numpy` is implemented.\n\n Returns\n -------\n V\n Returned if `B` and `Br` are given, |VectorArray| from\n `A.source`.\n W\n Returned if `C` and `Cr` are given, |VectorArray| from\n `A.source`.\n\n Raises\n ------\n ValueError\n If `V` and `W` cannot be returned.\n \"\"\"\n # check types\n assert isinstance(A, Operator) and A.linear and A.source == A.range\n assert isinstance(Ar, Operator) and Ar.linear and Ar.source == Ar.range\n\n assert E is None or isinstance(E, Operator) and E.linear and E.source == E.range == A.source\n if E is None:\n E = IdentityOperator(A.source)\n assert Er is None or isinstance(Er, Operator) and Er.linear and Er.source == Er.range == Ar.source\n\n compute_V = B is not None and Br is not None\n compute_W = C is not None and Cr is not None\n\n if not compute_V and not compute_W:\n raise ValueError('Not enough parameters are given to solve a Sylvester equation.')\n\n if compute_V:\n assert isinstance(B, Operator) and B.linear and B.range == A.source\n assert isinstance(Br, Operator) and Br.linear and Br.range == Ar.source\n assert B.source == Br.source\n\n if compute_W:\n assert isinstance(C, Operator) and C.linear and C.source == A.source\n assert isinstance(Cr, Operator) and Cr.linear and Cr.source == Ar.source\n assert C.range == Cr.range\n\n # convert reduced operators\n Ar = to_matrix(Ar, format='dense')\n r = Ar.shape[0]\n if Er is not None:\n Er = to_matrix(Er, format='dense')\n\n # (Generalized) Schur decomposition\n if Er is None:\n TAr, Z = spla.schur(Ar, output='complex')\n Q = Z\n else:\n TAr, TEr, Q, Z = spla.qz(Ar, Er, output='complex')\n\n # solve for V, from the last column to the first\n if compute_V:\n V = A.source.empty(reserve=r)\n\n BrTQ = Br.apply_adjoint(Br.range.from_numpy(Q.T))\n BBrTQ = B.apply(BrTQ)\n for i in range(-1, -r - 1, -1):\n rhs = -BBrTQ[i].copy()\n if i < -1:\n if Er is not None:\n rhs -= A.apply(V.lincomb(TEr[i, :i:-1].conjugate()))\n rhs -= E.apply(V.lincomb(TAr[i, :i:-1].conjugate()))\n TErii = 1 if Er is None else TEr[i, i]\n eAaE = TErii.conjugate() * A + TAr[i, i].conjugate() * E\n V.append(eAaE.apply_inverse(rhs))\n\n V = V.lincomb(Z.conjugate()[:, ::-1])\n V = V.real\n\n # solve for W, from the first column to the last\n if compute_W:\n W = A.source.empty(reserve=r)\n\n CrZ = Cr.apply(Cr.source.from_numpy(Z.T))\n CTCrZ = C.apply_adjoint(CrZ)\n for i in range(r):\n rhs = -CTCrZ[i].copy()\n if i > 0:\n if Er is not None:\n rhs -= A.apply_adjoint(W.lincomb(TEr[:i, i]))\n rhs -= E.apply_adjoint(W.lincomb(TAr[:i, i]))\n TErii = 1 if Er is None else TEr[i, i]\n eAaE = TErii.conjugate() * A + TAr[i, i].conjugate() * E\n W.append(eAaE.apply_inverse_adjoint(rhs))\n\n W = W.lincomb(Q.conjugate())\n W = W.real\n\n if compute_V and compute_W:\n return V, W\n elif compute_V:\n return V\n else:\n return W\n", "# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nimport numpy as np\nimport scipy.linalg as spla\nimport scipy.sparse as sps\n\nfrom pymor.algorithms.sylvester import solve_sylv_schur\nfrom pymor.operators.numpy import NumpyMatrixOperator\n\nimport pytest\n\n\nn_list = [100, 1000]\nr_list = [1, 10, 20]\nm_list = [1, 2]\np_list = [1, 2]\n\n\ndef fro_norm(A):\n if not sps.issparse(A):\n return spla.norm(A)\n else:\n return sps.linalg.norm(A)\n\n\ndef diff_conv_1d_fd(n, a, b):\n diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)),\n (a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)),\n (a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))]\n A = sps.diags(diagonals, [0, -1, 1])\n return A\n\n\ndef diff_conv_1d_fem(n, a, b):\n diagonals = [-a * 2 * (n + 1) ** 2 * np.ones((n,)),\n (a * (n + 1) ** 2 + b * (n + 1) / 2) * np.ones((n - 1,)),\n (a * (n + 1) ** 2 - b * (n + 1) / 2) * np.ones((n - 1,))]\n A = sps.diags(diagonals, [0, -1, 1])\n diagonals = [2 / 3 * np.ones((n,)),\n 1 / 6 * np.ones((n - 1,)),\n 1 / 6 * np.ones((n - 1,))]\n E = sps.diags(diagonals, [0, -1, 1])\n return A, E\n\n\[email protected]('n', n_list)\[email protected]('r', r_list)\[email protected]('m', m_list)\ndef test_sylv_schur_V(n, r, m):\n np.random.seed(0)\n\n A = diff_conv_1d_fd(n, 1, 1)\n B = np.random.randn(n, m)\n\n Ar = np.random.randn(r, r) - r * np.eye(r)\n Br = np.random.randn(r, m)\n\n Aop = NumpyMatrixOperator(A)\n Bop = NumpyMatrixOperator(B)\n\n Arop = NumpyMatrixOperator(Ar)\n Brop = NumpyMatrixOperator(Br)\n\n Vva = solve_sylv_schur(Aop, Arop, B=Bop, Br=Brop)\n\n V = Vva.to_numpy().T\n\n AV = A.dot(V)\n VArT = V.dot(Ar.T)\n BBrT = B.dot(Br.T)\n assert fro_norm(AV + VArT + BBrT) / fro_norm(BBrT) < 1e-10\n\n\[email protected]('n', n_list)\[email protected]('r', r_list)\[email protected]('m', m_list)\ndef test_sylv_schur_V_E(n, r, m):\n np.random.seed(0)\n\n A, E = diff_conv_1d_fem(n, 1, 1)\n B = np.random.randn(n, m)\n\n Ar = np.random.randn(r, r) - r * np.eye(r)\n Er = np.random.randn(r, r)\n Er = (Er + Er.T) / 2\n Er += r * np.eye(r)\n Br = np.random.randn(r, m)\n\n Aop = NumpyMatrixOperator(A)\n Eop = NumpyMatrixOperator(E)\n Bop = NumpyMatrixOperator(B)\n\n Arop = NumpyMatrixOperator(Ar)\n Erop = NumpyMatrixOperator(Er)\n Brop = NumpyMatrixOperator(Br)\n\n Vva = solve_sylv_schur(Aop, Arop, E=Eop, Er=Erop, B=Bop, Br=Brop)\n\n V = Vva.to_numpy().T\n\n AVErT = A.dot(V.dot(Er.T))\n EVArT = E.dot(V.dot(Ar.T))\n BBrT = B.dot(Br.T)\n assert fro_norm(AVErT + EVArT + BBrT) / fro_norm(BBrT) < 1e-10\n\n\[email protected]('n', n_list)\[email protected]('r', r_list)\[email protected]('p', p_list)\ndef test_sylv_schur_W(n, r, p):\n np.random.seed(0)\n\n A = diff_conv_1d_fd(n, 1, 1)\n C = np.random.randn(p, n)\n\n Ar = np.random.randn(r, r) - r * np.eye(r)\n Cr = np.random.randn(p, r)\n\n Aop = NumpyMatrixOperator(A)\n Cop = NumpyMatrixOperator(C)\n\n Arop = NumpyMatrixOperator(Ar)\n Crop = NumpyMatrixOperator(Cr)\n\n Wva = solve_sylv_schur(Aop, Arop, C=Cop, Cr=Crop)\n\n W = Wva.to_numpy().T\n\n ATW = A.T.dot(W)\n WAr = W.dot(Ar)\n CTCr = C.T.dot(Cr)\n assert fro_norm(ATW + WAr + CTCr) / fro_norm(CTCr) < 1e-10\n\n\[email protected]('n', n_list)\[email protected]('r', r_list)\[email protected]('p', p_list)\ndef test_sylv_schur_W_E(n, r, p):\n np.random.seed(0)\n\n A, E = diff_conv_1d_fem(n, 1, 1)\n C = np.random.randn(p, n)\n\n Ar = np.random.randn(r, r) - r * np.eye(r)\n Er = np.random.randn(r, r)\n Er = (Er + Er.T) / 2\n Er += r * np.eye(r)\n Cr = np.random.randn(p, r)\n\n Aop = NumpyMatrixOperator(A)\n Eop = NumpyMatrixOperator(E)\n Cop = NumpyMatrixOperator(C)\n\n Arop = NumpyMatrixOperator(Ar)\n Erop = NumpyMatrixOperator(Er)\n Crop = NumpyMatrixOperator(Cr)\n\n Wva = solve_sylv_schur(Aop, Arop, E=Eop, Er=Erop, C=Cop, Cr=Crop)\n\n W = Wva.to_numpy().T\n\n ATWEr = A.T.dot(W.dot(Er))\n ETWAr = E.T.dot(W.dot(Ar))\n CTCr = C.T.dot(Cr)\n assert fro_norm(ATWEr + ETWAr + CTCr) / fro_norm(CTCr) < 1e-10\n" ]
[ [ "numpy.delete", "numpy.abs" ], [ "scipy.linalg.qz", "scipy.linalg.schur" ], [ "scipy.sparse.issparse", "numpy.random.seed", "numpy.eye", "scipy.sparse.diags", "numpy.ones", "scipy.sparse.linalg.norm", "numpy.random.randn", "scipy.linalg.norm" ] ]
TeamLasalle/RL-Project
[ "5a094c3fffa24a8e2f47952f0579339917a95636" ]
[ "python/RL/train.py" ]
[ "#-*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\nimport time\nimport sys\nimport copy\n\nsys.path.append(\"python\")\nfrom model import Seq2Seq_chatbot\n# from inference import Inference\nfrom data_reader import Data_Reader\nimport data_parser\nimport config\nimport re\n\nfrom gensim.models import KeyedVectors\nfrom rl_model import PolicyGradient_chatbot\nfrom scipy import spatial\nimport tensorflow as tf\nimport numpy as np\nimport math\n\n\n### Global Parameters ###\ncheckpoint = config.CHECKPOINT\nmodel_path = config.train_model_path\nmodel_name = config.train_model_name\nstart_epoch = config.start_epoch\nstart_batch = config.start_batch\n\n# reversed model\nreversed_model_path = config.reversed_model_path\nreversed_model_name = config.reversed_model_name\n\nword_count_threshold = config.WC_threshold\nr_word_count_threshold = config.reversed_WC_threshold\n\n# dialog simulation turns\nmax_turns = config.MAX_TURNS\n\ndull_set = [\"I don't know what you're talking about.\", \"I don't know.\", \"You don't know.\", \"You know what I mean.\", \"I know what you mean.\", \"You know what I'm saying.\", \"You don't know anything.\"]\n\n### Train Parameters ###\ntraining_type = config.training_type # 'normal' for seq2seq training, 'pg' for policy gradient\n\ndim_wordvec = 300\ndim_hidden = 1000\n\nn_encode_lstm_step = 22 + 22\nn_decode_lstm_step = 22\n\nr_n_encode_lstm_step = 22\nr_n_decode_lstm_step = 22\n\nlearning_rate = 0.0001\nepochs = 500\nbatch_size = config.batch_size\nreversed_batch_size = config.batch_size\n\ndef pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if not len(s):\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x\n\n\"\"\" Extract only the vocabulary part of the data \"\"\"\ndef refine(data):\n words = re.findall(\"[a-zA-Z'-]+\", data)\n words = [\"\".join(word.split(\"'\")) for word in words]\n # words = [\"\".join(word.split(\"-\")) for word in words]\n data = ' '.join(words)\n return data\n\ndef make_batch_X(batch_X, n_encode_lstm_step, dim_wordvec, word_vector, noise=False):\n for i in range(len(batch_X)):\n batch_X[i] = [word_vector[w] if w in word_vector else np.zeros(dim_wordvec) for w in batch_X[i]]\n if noise:\n batch_X[i].insert(0, np.random.normal(size=(dim_wordvec,))) # insert random normal at the first step\n\n if len(batch_X[i]) > n_encode_lstm_step:\n batch_X[i] = batch_X[i][:n_encode_lstm_step]\n else:\n for _ in range(len(batch_X[i]), n_encode_lstm_step):\n batch_X[i].append(np.zeros(dim_wordvec))\n\n current_feats = np.array(batch_X)\n return current_feats\n\ndef make_batch_Y(batch_Y, wordtoix, n_decode_lstm_step):\n current_captions = batch_Y\n current_captions = map(lambda x: '<bos> ' + x, current_captions)\n current_captions = map(lambda x: x.replace('.', ''), current_captions)\n current_captions = map(lambda x: x.replace(',', ''), current_captions)\n current_captions = map(lambda x: x.replace('\"', ''), current_captions)\n current_captions = map(lambda x: x.replace('\\n', ''), current_captions)\n current_captions = map(lambda x: x.replace('?', ''), current_captions)\n current_captions = map(lambda x: x.replace('!', ''), current_captions)\n current_captions = map(lambda x: x.replace('\\\\', ''), current_captions)\n current_captions = map(lambda x: x.replace('/', ''), current_captions)\n\n for idx, each_cap in enumerate(current_captions):\n word = each_cap.lower().split(' ')\n if len(word) < n_decode_lstm_step:\n current_captions[idx] = current_captions[idx] + ' <eos>'\n else:\n new_word = ''\n for i in range(n_decode_lstm_step-1):\n new_word = new_word + word[i] + ' '\n current_captions[idx] = new_word + '<eos>'\n\n current_caption_ind = []\n for cap in current_captions:\n current_word_ind = []\n for word in cap.lower().split(' '):\n if word in wordtoix:\n current_word_ind.append(wordtoix[word])\n else:\n current_word_ind.append(wordtoix['<unk>'])\n current_caption_ind.append(current_word_ind)\n\n current_caption_matrix = pad_sequences(current_caption_ind, padding='post', maxlen=n_decode_lstm_step)\n current_caption_matrix = np.hstack([current_caption_matrix, np.zeros([len(current_caption_matrix), 1])]).astype(int)\n current_caption_masks = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))\n nonzeros = np.array(map(lambda x: (x != 0).sum() + 1, current_caption_matrix))\n\n for ind, row in enumerate(current_caption_masks):\n row[:nonzeros[ind]] = 1\n\n return current_caption_matrix, current_caption_masks\n\ndef index2sentence(generated_word_index, prob_logit, ixtoword):\n # remove <unk> to second high prob. word\n for i in range(len(generated_word_index)):\n if generated_word_index[i] == 3 or generated_word_index[i] <= 1:\n sort_prob_logit = sorted(prob_logit[i])\n # print('max val', sort_prob_logit[-1])\n # print('second max val', sort_prob_logit[-2])\n # maxindex = np.where(prob_logit[i] == sort_prob_logit[-1])[0][0]\n curindex = np.where(prob_logit[i] == sort_prob_logit[-2])[0][0]\n count = 1\n while curindex <= 3:\n curindex = np.where(prob_logit[i] == sort_prob_logit[(-2)-count])[0][0]\n count += 1\n\n # print('max ind', maxindex, ixtoword[maxindex])\n # print('second max ind', curindex, ixtoword[curindex])\n generated_word_index[i] = curindex\n\n generated_words = []\n for ind in generated_word_index:\n generated_words.append(ixtoword[ind])\n\n # generate sentence\n punctuation = np.argmax(np.array(generated_words) == '<eos>') + 1\n generated_words = generated_words[:punctuation]\n generated_sentence = ' '.join(generated_words)\n\n # modify the output sentence \n generated_sentence = generated_sentence.replace('<bos> ', '')\n generated_sentence = generated_sentence.replace('<eos>', '')\n generated_sentence = generated_sentence.replace(' <eos>', '')\n generated_sentence = generated_sentence.replace('--', '')\n generated_sentence = generated_sentence.split(' ')\n for i in range(len(generated_sentence)):\n generated_sentence[i] = generated_sentence[i].strip()\n if len(generated_sentence[i]) > 1:\n generated_sentence[i] = generated_sentence[i][0].upper() + generated_sentence[i][1:] + '.'\n else:\n generated_sentence[i] = generated_sentence[i].upper()\n generated_sentence = ' '.join(generated_sentence)\n generated_sentence = generated_sentence.replace(' i ', ' I ')\n generated_sentence = generated_sentence.replace(\"i'm\", \"I'm\")\n generated_sentence = generated_sentence.replace(\"i'd\", \"I'd\")\n\n return generated_sentence\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef count_rewards(dull_loss, forward_entropy, backward_entropy, forward_target, backward_target, reward_type='pg'):\n ''' args:\n generated_word_indexs: <type 'numpy.ndarray'> \n word indexs generated by pre-trained model\n shape: (batch_size, n_decode_lstm_step)\n inference_feats: <type 'dict'> \n some features generated during inference\n keys:\n 'probs': \n shape: (n_decode_lstm_step, batch_size, n_words)\n 'embeds': \n shape: (n_decode_lstm_step, batch_size, dim_hidden)\n current word embeddings at each decode stage\n 'states': \n shape: (n_encode_lstm_step, batch_size, dim_hidden)\n LSTM_1's hidden state at each encode stage\n '''\n\n # normal training, rewards all equal to 1\n if reward_type == 'normal':\n return np.ones([batch_size, n_decode_lstm_step])\n\n if reward_type == 'pg':\n forward_entropy = np.array(forward_entropy).reshape(batch_size, n_decode_lstm_step)\n backward_entropy = np.array(backward_entropy).reshape(batch_size, n_decode_lstm_step)\n total_loss = np.zeros([batch_size, n_decode_lstm_step])\n\n for i in range(batch_size):\n # ease of answering\n total_loss[i, :] += dull_loss[i]\n \n # information flow\n # cosine_sim = 1 - spatial.distance.cosine(embeds[0][-1], embeds[1][-1])\n # IF = cosine_sim * (-1)\n \n # semantic coherence\n forward_len = len(forward_target[i].split())\n backward_len = len(backward_target[i].split())\n if forward_len > 0:\n total_loss[i, :] += (np.sum(forward_entropy[i]) / forward_len)\n if backward_len > 0:\n total_loss[i, :] += (np.sum(backward_entropy[i]) / backward_len)\n\n # total_loss = total_loss - np.mean(total_loss)\n total_loss = sigmoid(total_loss) * 1.1\n # print('total_loss[:, 0]', total_loss[:, 0])\n\n return total_loss\n\ndef train():\n global dull_set\n\n wordtoix, ixtoword, bias_init_vector = data_parser.preProBuildWordVocab(word_count_threshold=word_count_threshold)\n word_vector = KeyedVectors.load_word2vec_format('./model/word_vector.bin', binary=True)\n\n if len(dull_set) > batch_size:\n dull_set = dull_set[:batch_size]\n else:\n for _ in range(len(dull_set), batch_size):\n dull_set.append('')\n dull_matrix, dull_mask = make_batch_Y(\n batch_Y=dull_set, \n wordtoix=wordtoix, \n n_decode_lstm_step=n_decode_lstm_step)\n\n ones_reward = np.ones([batch_size, n_decode_lstm_step])\n\n g1 = tf.Graph()\n g2 = tf.Graph()\n\n default_graph = tf.get_default_graph() \n\n with g1.as_default():\n model = PolicyGradient_chatbot(\n dim_wordvec=dim_wordvec,\n n_words=len(wordtoix),\n dim_hidden=dim_hidden,\n batch_size=batch_size,\n n_encode_lstm_step=n_encode_lstm_step,\n n_decode_lstm_step=n_decode_lstm_step,\n bias_init_vector=bias_init_vector,\n lr=learning_rate)\n train_op, loss, input_tensors, inter_value = model.build_model()\n tf_states, tf_actions, tf_feats = model.build_generator()\n sess = tf.InteractiveSession()\n saver = tf.train.Saver(max_to_keep=100)\n if checkpoint:\n print(\"Use Model {}.\".format(model_name))\n saver.restore(sess, os.path.join(model_path, model_name))\n print(\"Model {} restored.\".format(model_name))\n else:\n print(\"Restart training...\")\n tf.global_variables_initializer().run()\n\n r_wordtoix, r_ixtoword, r_bias_init_vector = data_parser.preProBuildWordVocab(word_count_threshold=r_word_count_threshold)\n with g2.as_default():\n reversed_model = Seq2Seq_chatbot(\n dim_wordvec=dim_wordvec,\n n_words=len(r_wordtoix),\n dim_hidden=dim_hidden,\n batch_size=reversed_batch_size,\n n_encode_lstm_step=r_n_encode_lstm_step,\n n_decode_lstm_step=r_n_decode_lstm_step,\n bias_init_vector=r_bias_init_vector,\n lr=learning_rate)\n _, _, word_vectors, caption, caption_mask, reverse_inter = reversed_model.build_model()\n sess2 = tf.InteractiveSession()\n saver2 = tf.train.Saver()\n saver2.restore(sess2, os.path.join(reversed_model_path, reversed_model_name))\n print(\"Reversed model {} restored.\".format(reversed_model_name))\n\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n # sess = tf.InteractiveSession()\n\n dr = Data_Reader(cur_train_index=config.cur_train_index, load_list=config.load_list)\n\n for epoch in range(start_epoch, epochs):\n n_batch = dr.get_batch_num(batch_size)\n sb = start_batch if epoch == start_epoch else 0\n for batch in range(sb, n_batch):\n start_time = time.time()\n\n batch_X, batch_Y, former = dr.generate_training_batch_with_former(batch_size)\n\n current_feats = make_batch_X(\n batch_X=copy.deepcopy(batch_X), \n n_encode_lstm_step=n_encode_lstm_step, \n dim_wordvec=dim_wordvec,\n word_vector=word_vector)\n\n current_caption_matrix, current_caption_masks = make_batch_Y(\n batch_Y=copy.deepcopy(batch_Y), \n wordtoix=wordtoix, \n n_decode_lstm_step=n_decode_lstm_step)\n\n if training_type == 'pg':\n # action: generate batch_size sents\n action_word_indexs, inference_feats = sess.run([tf_actions, tf_feats],\n feed_dict={\n tf_states: current_feats\n })\n action_word_indexs = np.array(action_word_indexs).reshape(batch_size, n_decode_lstm_step)\n action_probs = np.array(inference_feats['probs']).reshape(batch_size, n_decode_lstm_step, -1)\n\n actions = []\n actions_list = []\n for i in range(len(action_word_indexs)):\n action = index2sentence(\n generated_word_index=action_word_indexs[i], \n prob_logit=action_probs[i],\n ixtoword=ixtoword)\n actions.append(action)\n actions_list.append(action.split())\n\n action_feats = make_batch_X(\n batch_X=copy.deepcopy(actions_list), \n n_encode_lstm_step=n_encode_lstm_step, \n dim_wordvec=dim_wordvec,\n word_vector=word_vector)\n\n action_caption_matrix, action_caption_masks = make_batch_Y(\n batch_Y=copy.deepcopy(actions), \n wordtoix=wordtoix, \n n_decode_lstm_step=n_decode_lstm_step)\n\n # ease of answering\n dull_loss = []\n for vector in action_feats:\n action_batch_X = np.array([vector for _ in range(batch_size)])\n d_loss = sess.run(loss,\n feed_dict={\n input_tensors['word_vectors']: action_batch_X,\n input_tensors['caption']: dull_matrix,\n input_tensors['caption_mask']: dull_mask,\n input_tensors['reward']: ones_reward\n })\n d_loss = d_loss * -1. / len(dull_set)\n dull_loss.append(d_loss)\n\n # Information Flow\n pass\n\n # semantic coherence\n forward_inter = sess.run(inter_value,\n feed_dict={\n input_tensors['word_vectors']: current_feats,\n input_tensors['caption']: action_caption_matrix,\n input_tensors['caption_mask']: action_caption_masks,\n input_tensors['reward']: ones_reward\n })\n forward_entropies = forward_inter['entropies']\n former_caption_matrix, former_caption_masks = make_batch_Y(\n batch_Y=copy.deepcopy(former), \n wordtoix=wordtoix, \n n_decode_lstm_step=n_decode_lstm_step)\n action_feats = make_batch_X(\n batch_X=copy.deepcopy(actions_list), \n n_encode_lstm_step=r_n_encode_lstm_step, \n dim_wordvec=dim_wordvec,\n word_vector=word_vector)\n backward_inter = sess2.run(reverse_inter,\n feed_dict={\n word_vectors: action_feats,\n caption: former_caption_matrix,\n caption_mask: former_caption_masks\n })\n backward_entropies = backward_inter['entropies']\n\n # reward: count goodness of actions\n rewards = count_rewards(dull_loss, forward_entropies, backward_entropies, actions, former, reward_type='pg')\n \n # policy gradient: train batch with rewards\n if batch % 10 == 0:\n _, loss_val = sess.run(\n [train_op, loss],\n feed_dict={\n input_tensors['word_vectors']: current_feats,\n input_tensors['caption']: current_caption_matrix,\n input_tensors['caption_mask']: current_caption_masks,\n input_tensors['reward']: rewards\n })\n print(\"Epoch: {}, batch: {}, loss: {}, Elapsed time: {}\".format(epoch, batch, loss_val, time.time() - start_time))\n else:\n _ = sess.run(train_op,\n feed_dict={\n input_tensors['word_vectors']: current_feats,\n input_tensors['caption']: current_caption_matrix,\n input_tensors['caption_mask']: current_caption_masks,\n input_tensors['reward']: rewards\n })\n if batch % 1000 == 0 and batch != 0:\n print(\"Epoch {} batch {} is done. Saving the model ...\".format(epoch, batch))\n saver.save(sess, os.path.join(model_path, 'model-{}-{}'.format(epoch, batch)))\n if training_type == 'normal':\n if batch % 10 == 0:\n _, loss_val = sess.run(\n [train_op, loss],\n feed_dict={\n input_tensors['word_vectors']: current_feats,\n input_tensors['caption']: current_caption_matrix,\n input_tensors['caption_mask']: current_caption_masks,\n input_tensors['reward']: ones_reward\n })\n print(\"Epoch: {}, batch: {}, loss: {}, Elapsed time: {}\".format(epoch, batch, loss_val, time.time() - start_time))\n else:\n _ = sess.run(train_op,\n feed_dict={\n input_tensors['word_vectors']: current_feats,\n input_tensors['caption']: current_caption_matrix,\n input_tensors['caption_mask']: current_caption_masks,\n input_tensors['reward']: ones_reward\n })\n\n print(\"Epoch \", epoch, \" is done. Saving the model ...\")\n saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)\n\nif __name__ == \"__main__\":\n train()\n" ]
[ [ "tensorflow.Graph", "numpy.sum", "tensorflow.InteractiveSession", "numpy.asarray", "numpy.ones", "numpy.exp", "numpy.max", "numpy.random.normal", "tensorflow.global_variables_initializer", "tensorflow.train.Saver", "tensorflow.get_default_graph", "numpy.array", "numpy.zeros", "numpy.where" ] ]
gag/onnx-coreml
[ "f6d331d0dc381d9c469e5c21a006ee6f5c704742" ]
[ "onnx_coreml/_transformers.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom typing import Sequence, Text, Dict, List, Tuple\nimport numpy as np\n\nfrom onnx import TensorProto\n\nfrom ._graph import Graph, Node\n\n\nclass NodesFuser(object):\n '''\n An abstract helper for merging nodes\n '''\n def __init__(self,\n num_nodes, # type: int\n ):\n # type: (...) -> None\n assert num_nodes >= 2, \"Algorithm only works if fusing multiple nodes\"\n self.num_nodes = num_nodes\n\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes = graph.nodes\n merged_nodes = {}\n for node in nodes:\n nodes_window = [] # type: List[Node]\n n = node\n for _ in range(self.num_nodes - 1):\n if len(n.parents) != 1:\n # We're only fusing nodes with single parents\n break\n p = n.get_only_parent()\n if len(p.children) != 1:\n # We can only fuse a node if its parent's\n # value isn't used by any other node.\n break\n nodes_window.insert(0, n)\n n = p\n if len(nodes_window) > 0:\n # add parent of chained nodes\n first = nodes_window[0]\n p = first.get_only_parent()\n if len(p.children) == 1:\n nodes_window.insert(0, p)\n if len(nodes_window) != self.num_nodes:\n continue\n if not self.is_eligible(graph, nodes_window):\n continue\n merged = self.merge(graph, nodes_window)\n first, last = nodes_window[0], nodes_window[-1]\n for parent in first.parents:\n parent.children.remove(first)\n if merged[0] not in parent.children:\n parent.add_child(merged[0])\n for child in last.children:\n child.parents.remove(last)\n if merged[-1] not in child.parents:\n child.add_parent(merged[-1])\n for n in nodes_window:\n merged_nodes[n.name] = merged\n\n transformed_nodes = []\n added_merged = [] # type: List[Node]\n for node in nodes:\n if node.name in merged_nodes:\n merged = merged_nodes[node.name]\n if merged[0] not in added_merged:\n for n in merged:\n transformed_nodes.append(n)\n added_merged.append(merged[0])\n else:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n '''Returns true if this subset of nodes is eligible for fusion.'''\n raise NotImplementedError('Must be implemented by subclass.')\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n '''Merge nodes'''\n nodes[0].outputs = nodes[-1].outputs\n return [nodes[0]]\n\nclass ConvAddFuser(NodesFuser):\n '''\n Fuses Add layer into parent convolution layer.\n '''\n def __init__(self): # type: () -> None\n super(ConvAddFuser, self).__init__(2)\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n parent, child = nodes[0], nodes[1]\n if parent.op_type != 'Conv':\n return False\n if child.op_type != 'Add':\n return False\n if 'broadcast' not in child.attrs:\n return False\n if 'axis' not in child.attrs:\n return False\n if parent.inputs[1] not in parent.input_tensors:\n return False\n if len(parent.inputs) > 2 and parent.inputs[2] not in parent.input_tensors:\n return False\n if child.inputs[1] not in child.input_tensors:\n return False\n\n broadcast = child.attrs['broadcast']\n if broadcast != 1:\n return False\n\n axis = child.attrs['axis']\n if axis != 1:\n return False\n\n return True\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n parent, child = nodes[0], nodes[1]\n output_channels = parent.input_tensors[parent.inputs[1]].shape[0]\n if len(parent.inputs) > 2:\n bias_input_name = parent.inputs[2]\n bias = parent.input_tensors[bias_input_name]\n else:\n bias_input_name = \"{}_bias\".format(parent.name,)\n parent.inputs.append(bias_input_name)\n bias = np.zeros(\n (output_channels,), dtype=np.float32\n )\n parent.input_tensors[bias_input_name] = bias\n bias = bias + child.input_tensors[child.inputs[1]]\n parent.input_tensors[bias_input_name] = bias\n parent.outputs = child.outputs\n parent.children.remove(child)\n child.parents.remove(parent)\n return [parent]\n\nclass BNBroadcastedMulFuser(NodesFuser):\n '''\n Fuses Mul into BatchNorm\n '''\n def __init__(self): # type: () -> None\n super(BNBroadcastedMulFuser, self).__init__(2)\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n parent, child = nodes[0], nodes[1]\n if parent.op_type != 'BatchNormalization':\n return False\n if child.op_type != 'Mul':\n return False\n if len(child.inputs) != 2:\n return False\n if child.inputs[1] not in child.input_tensors:\n return False\n t = child.input_tensors[child.inputs[1]]\n if len(np.squeeze(t).shape) != 1:\n return False\n if parent.inputs[1] not in parent.input_tensors:\n return False\n if parent.inputs[2] not in parent.input_tensors:\n return False\n return True\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n parent, child = nodes[0], nodes[1]\n weight = parent.input_tensors[parent.inputs[1]]\n bias = parent.input_tensors[parent.inputs[2]]\n W = np.squeeze(child.input_tensors[child.inputs[1]])\n parent.input_tensors[parent.inputs[1]] = np.multiply(weight, W)\n parent.input_tensors[parent.inputs[2]] = np.multiply(bias, W)\n parent.outputs = child.outputs\n parent.children.remove(child)\n child.parents.remove(parent)\n return [parent]\n\nclass BNBroadcastedAddFuser(NodesFuser):\n '''\n Fuses Add into BatchNorm\n '''\n def __init__(self): # type: () -> None\n super(BNBroadcastedAddFuser, self).__init__(2)\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n parent, child = nodes[0], nodes[1]\n if parent.op_type != 'BatchNormalization':\n return False\n if child.op_type != 'Add':\n return False\n if len(child.inputs) != 2:\n return False\n if child.inputs[1] not in child.input_tensors:\n return False\n t = child.input_tensors[child.inputs[1]]\n if len(np.squeeze(t).shape) != 1:\n return False\n if parent.inputs[1] not in parent.input_tensors:\n return False\n if parent.inputs[2] not in parent.input_tensors:\n return False\n return True\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n parent, child = nodes[0], nodes[1]\n bias = parent.input_tensors[parent.inputs[2]]\n b = np.squeeze(child.input_tensors[child.inputs[1]])\n parent.input_tensors[parent.inputs[2]] = bias + b\n parent.outputs = child.outputs\n parent.children.remove(child)\n child.parents.remove(parent)\n return [parent]\n\nclass DropoutRemover(NodesFuser):\n '''\n Removes Dropout layer\n '''\n def __init__(self): # type: () -> None\n super(DropoutRemover, self).__init__(2)\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n child = nodes[1]\n return child.op_type == \"Dropout\"\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n parent, child = nodes[0], nodes[1]\n parent.children.remove(child)\n child.parents.remove(parent)\n parent.outputs = [child.outputs[0]]\n return [parent]\n\nclass ReshapeInitTensorFuser(object):\n '''\n Fuses Reshape operator if it is used only to reshape blob in\n graph initializer. We can reshape here instead of runtime.\n '''\n\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes = graph.nodes\n removed = []\n for node in nodes:\n if node.op_type != 'Reshape':\n continue\n if not (len(node.input_tensors) == 2 or len(node.input_tensors) == 1):\n continue\n tensor_name = node.inputs[0]\n if tensor_name not in node.input_tensors:\n continue\n if len(node.inputs) > 1:\n shape_name = node.inputs[1]\n if shape_name not in node.input_tensors:\n continue\n is_non_constant_parent = False\n if len(node.parents) > 0:\n for parent in node.parents:\n if parent.op_type != 'Constant':\n is_non_constant_parent = True\n break\n if is_non_constant_parent:\n continue\n\n removed.append(node)\n output_name = node.outputs[0]\n\n tensor = node.input_tensors[tensor_name]\n if 'shape' in node.attrs:\n shape = tuple(node.attrs[\"shape\"])\n else:\n shape = node.input_tensors[shape_name] # type: ignore\n\n # ONNX spec supports setting dimension to '0', in which case\n # it should be taken from old dimension.\n # This isn't supported in numpy, so don't transform.\n # TODO Should we support this case?\n if any([s == 0 for s in shape]):\n continue\n\n reshaped_tensor = tensor.reshape(shape)\n\n for child in node.children:\n child.parents.remove(node)\n child.input_tensors[output_name] = reshaped_tensor\n\n transformed_nodes = [node for node in nodes if node not in removed]\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass OutputRenamer(object):\n '''\n Rename outputs according to mapping\n '''\n def __init__(self,\n mapping, # type: Dict[Text, Text]\n ):\n # type: (...) -> None\n self.mapping = mapping\n\n def __call__(self, graph): # type: (Graph) -> Graph\n mapping = self.mapping.copy()\n nodes = graph.nodes\n for node in nodes:\n for i in range(len(node.outputs)):\n output = node.outputs[i]\n if output not in mapping:\n continue\n node.outputs[i] = mapping[output]\n for child in node.children:\n for j in range(len(child.inputs)):\n input_ = child.inputs[j]\n if input_ != output:\n continue\n child.inputs[j] = mapping[output]\n del mapping[output]\n if len(mapping) == 0:\n break\n return graph\n\nclass PixelShuffleFuser(NodesFuser):\n '''\n Fuses 3 operators reshape->transpose->reshape which is equivalent to\n pytorch's pixel_shuffle layer\n '''\n def __init__(self): # type: () -> None\n super(PixelShuffleFuser, self).__init__(3)\n self.num_added = 0\n\n def is_eligible(self, graph, nodes): # type: (Graph, Sequence[Node]) -> bool\n if nodes[0].op_type != 'Reshape':\n return False\n if nodes[1].op_type != 'Transpose':\n return False\n if nodes[2].op_type != 'Reshape':\n return False\n if len(nodes[0].inputs) == 1:\n return False # it's an old version of onnx Reshape op that had shape as an attribute\n if nodes[0].inputs[1] not in nodes[0].input_tensors:\n return False\n if nodes[2].inputs[1] not in nodes[2].input_tensors:\n return False\n\n shape = nodes[0].input_tensors[nodes[0].inputs[1]]\n if len(shape) != 6:\n return False\n if shape[0] != 1 or shape[2] != shape[3]:\n return False\n\n input_channels = shape[1]\n scale_factor = shape[2]\n input_height = shape[4]\n input_width = shape[5]\n\n if nodes[1].attrs.get('perm', []) != [0, 1, 4, 2, 5, 3]:\n return False\n\n shape = nodes[2].input_tensors[nodes[2].inputs[1]]\n if len(shape) != 4:\n return False\n\n output_channels = shape[1]\n output_height = shape[2]\n output_width = shape[3]\n if input_channels != output_channels:\n return False\n if (input_height * scale_factor) != output_height:\n return False\n if (input_width * scale_factor) != output_width:\n return False\n\n return True\n\n def get_unique_edge_name(self, graph, name): # type: (Graph, Text) -> Text\n self.num_added += 1\n return graph.get_unique_edge_name(name + '_' + str(self.num_added))\n\n def merge(self, graph, nodes): # type: (Graph, Sequence[Node]) -> Sequence[Node]\n '''\n Pixel shuffle is implemented using 3 operators:\n - Reshape(1, channels, scale, scale, height, width)\n - Transpose(0, 1, 4, 2, 5, 3)\n - Reshape(1, channels, height * scale, width * scale)\n CoreML Reshape and Transpose layers don't support tensors with more\n than 4 dimensions. Thus we change above sequence of operators to the\n following equivalent sequence:\n - Reshape(channels, scale * scale, height, width)\n - Transpose(0, 2, 1, 3)\n - Reshape(channels * height, scale, scale, width)\n - Transpose(0, 1, 3, 2)\n - Reshape(1, channels, height * scale, width * scale)\n '''\n reshape_1 = nodes[0]\n transpose_1 = nodes[1]\n transpose_1.children = []\n\n shape = reshape_1.input_tensors[reshape_1.inputs[1]]\n\n channels = shape[1]\n scale = shape[2]\n height = shape[4]\n width = shape[5]\n\n reshape_1.input_tensors[reshape_1.inputs[1]] = np.asarray([channels, scale * scale, height, width])\n transpose_1.attrs['perm'] = [0, 2, 1, 3]\n\n reshape_output_name = 'pixel_shuffle_reshape'\n transpose_output_name = 'pixel_shuffle_transpose'\n\n transpose_1.outputs = [\n self.get_unique_edge_name(graph, transpose_output_name)\n ]\n\n shape_name_second_reshape = self.get_unique_edge_name(graph, reshape_output_name)\n output_name_second_reshape = self.get_unique_edge_name(graph, reshape_output_name)\n reshape_2 = Node(\n reshape_output_name,\n 'Reshape',\n {},\n [transpose_1.outputs[0], shape_name_second_reshape],\n [output_name_second_reshape]\n )\n reshape_2.input_tensors[shape_name_second_reshape] = np.asarray([channels * height, scale, scale, width])\n transpose_1.add_child(reshape_2)\n\n transpose_2 = Node(\n transpose_output_name,\n 'Transpose',\n {'perm': [0, 1, 3, 2]},\n reshape_2.outputs,\n [self.get_unique_edge_name(graph, transpose_output_name)]\n )\n reshape_2.add_child(transpose_2)\n\n final_reshape = nodes[2]\n final_reshape.inputs = [transpose_2.outputs[0], nodes[2].inputs[1]]\n final_reshape.parents = []\n transpose_2.add_child(final_reshape)\n return [reshape_1, transpose_1, reshape_2, transpose_2, final_reshape]\n\nclass AddModelInputsOutputs(object):\n '''\n Expose hidden states of recurrent layers as model inputs and outputs\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n input_names = [str(input_[0]) for input_ in graph.inputs]\n output_names = [str(output_[0]) for output_ in graph.outputs]\n for node in graph.nodes:\n if str(node.op_type) == 'LSTM':\n input_h = node.inputs[5] if len(node.inputs) > 5 else node.inputs[0] + '_h_input'\n input_c = node.inputs[6] if len(node.inputs) > 6 else node.inputs[0] + '_c_input'\n output_h = node.outputs[1] if len(node.outputs) > 1 else node.outputs[0] + '_h_output'\n output_c = node.outputs[2] if len(node.outputs) > 2 else node.outputs[0] + '_c_output'\n h = node.attrs[\"hidden_size\"]\n for input_ in [str(input_h), str(input_c)]:\n if input_ not in input_names:\n graph.inputs.append(tuple((input_, TensorProto.FLOAT, (h,)))) #type: ignore\n if input_ not in graph.blob_to_op_type:\n graph.blob_to_op_type[input_] = ['LSTM']\n for output_ in [str(output_h), str(output_c)]:\n if output_ not in output_names:\n graph.outputs.append(tuple((output_, TensorProto.FLOAT, (h,)))) #type: ignore\n graph.blob_from_op_type[output_] = 'LSTM'\n return graph\n\nclass ConstantsToInitializers(object):\n '''\n Takes onnx Constant nodes and puts the tensor into graph initializers instead.\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n output_names = [str(output_[0]) for output_ in graph.outputs]\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'Constant' and (node.name not in output_names):\n nodes_to_be_removed.append(node)\n x = node.attrs[\"value\"]\n for child in node.children:\n child.input_tensors[node.outputs[0]] = x\n child.parents.remove(node)\n graph.shape_dict[node.outputs[0]] = x.shape\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass ConstantFillToInitializers(object):\n '''\n Takes onnx ConstantFill nodes and puts the tensor into graph initializers instead, for simple cases only.\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n output_names = [str(output_[0]) for output_ in graph.outputs]\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'ConstantFill' and (node.name not in output_names) and \\\n node.attrs.get('input_as_shape', 0) and node.inputs[0] in node.input_tensors \\\n and node.attrs.get('extra_shape', None) is None:\n\n s = node.input_tensors[node.inputs[0]]\n x = np.ones(tuple(s.astype(int))) * node.attrs.get('value', 0.0)\n nodes_to_be_removed.append(node)\n for child in node.children:\n child.input_tensors[node.outputs[0]] = x\n child.parents.remove(node)\n graph.shape_dict[node.outputs[0]] = x.shape\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass ShapeOpRemover(object):\n '''\n remove shape op, if the input shape is fully known\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n output_names = [str(output_[0]) for output_ in graph.outputs]\n for node in graph.nodes:\n if node.op_type == 'Shape' and (node.name not in output_names) and node.inputs[0] in graph.shape_dict:\n x_tuple = graph.shape_dict[node.inputs[0]] # type: Tuple[int, ...]\n is_well_defined = True\n for i in x_tuple:\n if not (isinstance(i, int) and i > 0):\n is_well_defined = False\n break\n if is_well_defined:\n x = np.asarray(x_tuple, dtype=np.float32)\n nodes_to_be_removed.append(node)\n for child in node.children:\n child.input_tensors[node.outputs[0]] = x\n child.parents.remove(node)\n for parent in node.parents:\n parent.children.remove(node)\n graph.shape_dict[node.outputs[0]] = x.shape\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass ImageScalerRemover(object):\n '''\n Removes ImageScaler layer if connected to a model input and single parent child nodes\n '''\n\n def __call__(self, graph): # type: (Graph) -> Graph\n input_names = [str(input_[0]) for input_ in graph.inputs]\n nodes_to_be_removed = []\n for node in graph.nodes:\n if (node.op_type != 'ImageScaler') or (len(node.parents) != 0) or (node.inputs[0] not in input_names):\n continue\n nodes_to_be_removed.append(node.name)\n for child in node.children:\n for i, child_input in enumerate(child.inputs):\n if child_input == node.outputs[0]:\n child.inputs[i] = node.inputs[0]\n child.parents.remove(node)\n break\n\n transformed_nodes = []\n for node in graph.nodes:\n if node.name not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass UnsqueezeConstantRemover(object):\n '''\n Removes Unsqueeze or Squeeze op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n if (node.op_type == 'Unsqueeze' or node.op_type == 'Squeeze') and \\\n len(node.parents) == 0 and node.inputs[0] in node.input_tensors:\n nodes_to_be_removed.append(node)\n x = node.input_tensors[node.inputs[0]]\n if node.op_type == 'Unsqueeze':\n axes = node.attrs['axes']\n axes.sort()\n for axis in axes:\n x = np.expand_dims(x, axis=axis) # type: ignore\n else:\n axes = node.attrs.get('axes', None)\n x = np.squeeze(x, axis = tuple(axes)) \n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n child_node.input_tensors[node.outputs[0]] = x\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass ConcatConstantRemover(object):\n '''\n Removes Concat op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n are_all_inputs_constant = True\n for input_ in node.inputs:\n if input_ not in node.input_tensors:\n are_all_inputs_constant = False\n break\n if node.op_type == 'Concat' and len(node.parents) == 0 and are_all_inputs_constant:\n nodes_to_be_removed.append(node)\n x_arr = []\n for input_ in node.inputs:\n x_arr.append(node.input_tensors[input_])\n axis = node.attrs.get('axis', 0)\n x = np.concatenate(x_arr, axis=axis) # type: ignore\n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n child_node.input_tensors[node.outputs[0]] = x\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass TransposeConstantRemover(object):\n '''\n Removes Transpose op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'Transpose' and len(node.parents) == 0 and node.inputs[0] in node.input_tensors:\n nodes_to_be_removed.append(node)\n x = node.input_tensors[node.inputs[0]]\n perm = node.attrs.get('perm', None)\n x = np.transpose(x, axes = perm) # type: ignore\n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n child_node.input_tensors[node.outputs[0]] = x\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass SliceConstantRemover(object):\n '''\n Removes Slice op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'Slice' and len(node.parents) == 0 and node.inputs[0] in node.input_tensors:\n nodes_to_be_removed.append(node)\n x = node.input_tensors[node.inputs[0]]\n ends = node.attrs['ends']\n starts = node.attrs['starts']\n axes = node.attrs.get('axes', range(len(starts)))\n for i, a in enumerate(axes):\n s = starts[i]\n e = ends[i]\n n = x.shape[a]\n if s < 0: s += n\n if e < 0: e += n\n x = np.take(x, range(s, e), axis=a) # type: ignore\n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n child_node.input_tensors[node.outputs[0]] = x\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass GatherConstantRemover(object):\n '''\n Removes Gather op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'Gather' and len(node.parents) == 0 and \\\n node.inputs[0] in node.input_tensors and node.inputs[1] in node.input_tensors:\n\n nodes_to_be_removed.append(node)\n data = node.input_tensors[node.inputs[0]]\n idx = node.input_tensors[node.inputs[1]]\n axis = node.attrs.get('axis', 0)\n x = np.take(data, idx, axis=axis)\n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n child_node.input_tensors[node.outputs[0]] = x\n\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)\n\nclass DivMulConstantRemover(object):\n '''\n Removes Slice op, if its input is constant\n '''\n def __call__(self, graph): # type: (Graph) -> Graph\n nodes_to_be_removed = []\n for node in graph.nodes:\n if node.op_type == 'Div' or node.op_type == 'Mul':\n if len(node.parents) == 0 and node.inputs[0] in node.input_tensors and node.inputs[1] in node.input_tensors:\n nodes_to_be_removed.append(node)\n x = node.input_tensors[node.inputs[0]]\n y = node.input_tensors[node.inputs[1]]\n graph.shape_dict[node.outputs[0]] = x.shape\n for child_node in node.children:\n child_node.parents.remove(node)\n if node.op_type == 'Div':\n child_node.input_tensors[node.outputs[0]] = x / y\n else:\n child_node.input_tensors[node.outputs[0]] = x * y\n transformed_nodes = []\n for node in graph.nodes:\n if node not in nodes_to_be_removed:\n transformed_nodes.append(node)\n return Graph(transformed_nodes, graph.inputs, graph.outputs, graph.shape_dict)" ]
[ [ "numpy.expand_dims", "numpy.take", "numpy.multiply", "numpy.asarray", "numpy.squeeze", "numpy.concatenate", "numpy.transpose", "numpy.zeros" ] ]
ralzakark/keras-YOLOv3-model-set
[ "711fbc5298a94e3c0394fdfaecb777a3ef0aeb50" ]
[ "yolo3/loss.py" ]
[ "# -*- coding=utf-8 -*-\n#!/usr/bin/python3\n\nimport math\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom yolo3.postprocess import yolo3_decode\n\ndef softmax_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25):\n \"\"\"\n Compute softmax focal loss.\n Reference Paper:\n \"Focal Loss for Dense Object Detection\"\n https://arxiv.org/abs/1708.02002\n\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, num_classes).\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, num_classes).\n gamma: exponent of the modulating factor (1 - p_t) ^ gamma.\n alpha: optional alpha weighting factor to balance positives vs negatives.\n\n # Returns\n softmax_focal_loss: Softmax focal loss, tensor of shape (?, num_boxes).\n \"\"\"\n\n # Scale predictions so that the class probas of each sample sum to 1\n #y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n\n # Clip the prediction value to prevent NaN's and Inf's\n #epsilon = K.epsilon()\n #y_pred = K.clip(y_pred, epsilon, 1. - epsilon)\n y_pred = tf.nn.softmax(y_pred)\n y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)\n\n # Calculate Cross Entropy\n cross_entropy = -y_true * tf.math.log(y_pred)\n\n # Calculate Focal Loss\n softmax_focal_loss = alpha * tf.pow(1 - y_pred, gamma) * cross_entropy\n\n return softmax_focal_loss\n\n\ndef sigmoid_focal_loss(y_true, y_pred, gamma=2.0, alpha=0.25):\n \"\"\"\n Compute sigmoid focal loss.\n Reference Paper:\n \"Focal Loss for Dense Object Detection\"\n https://arxiv.org/abs/1708.02002\n\n # Arguments\n y_true: Ground truth targets,\n tensor of shape (?, num_boxes, num_classes).\n y_pred: Predicted logits,\n tensor of shape (?, num_boxes, num_classes).\n gamma: exponent of the modulating factor (1 - p_t) ^ gamma.\n alpha: optional alpha weighting factor to balance positives vs negatives.\n\n # Returns\n sigmoid_focal_loss: Sigmoid focal loss, tensor of shape (?, num_boxes).\n \"\"\"\n sigmoid_loss = K.binary_crossentropy(y_true, y_pred, from_logits=True)\n\n pred_prob = tf.sigmoid(y_pred)\n p_t = ((y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob)))\n modulating_factor = tf.pow(1.0 - p_t, gamma)\n alpha_weight_factor = (y_true * alpha + (1 - y_true) * (1 - alpha))\n\n sigmoid_focal_loss = modulating_factor * alpha_weight_factor * sigmoid_loss\n #sigmoid_focal_loss = tf.reduce_sum(sigmoid_focal_loss, axis=-1)\n\n return sigmoid_focal_loss\n\n\ndef box_iou(b1, b2):\n \"\"\"\n Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n \"\"\"\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef box_giou(b_true, b_pred):\n \"\"\"\n Calculate GIoU loss on anchor boxes\n Reference Paper:\n \"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression\"\n https://arxiv.org/abs/1902.09630\n\n Parameters\n ----------\n b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n\n Returns\n -------\n giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n b_true_xy = b_true[..., :2]\n b_true_wh = b_true[..., 2:4]\n b_true_wh_half = b_true_wh/2.\n b_true_mins = b_true_xy - b_true_wh_half\n b_true_maxes = b_true_xy + b_true_wh_half\n\n b_pred_xy = b_pred[..., :2]\n b_pred_wh = b_pred[..., 2:4]\n b_pred_wh_half = b_pred_wh/2.\n b_pred_mins = b_pred_xy - b_pred_wh_half\n b_pred_maxes = b_pred_xy + b_pred_wh_half\n\n intersect_mins = K.maximum(b_true_mins, b_pred_mins)\n intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]\n b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]\n union_area = b_true_area + b_pred_area - intersect_area\n # calculate IoU, add epsilon in denominator to avoid dividing by 0\n iou = intersect_area / (union_area + K.epsilon())\n\n # get enclosed area\n enclose_mins = K.minimum(b_true_mins, b_pred_mins)\n enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)\n enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n # calculate GIoU, add epsilon in denominator to avoid dividing by 0\n giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())\n giou = K.expand_dims(giou, -1)\n\n return giou\n\n\ndef box_diou(b_true, b_pred, use_ciou=False):\n \"\"\"\n Calculate DIoU/CIoU loss on anchor boxes\n Reference Paper:\n \"Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression\"\n https://arxiv.org/abs/1911.08287\n\n Parameters\n ----------\n b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n use_ciou: bool flag to indicate whether to use CIoU loss type\n\n Returns\n -------\n diou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n b_true_xy = b_true[..., :2]\n b_true_wh = b_true[..., 2:4]\n b_true_wh_half = b_true_wh/2.\n b_true_mins = b_true_xy - b_true_wh_half\n b_true_maxes = b_true_xy + b_true_wh_half\n\n b_pred_xy = b_pred[..., :2]\n b_pred_wh = b_pred[..., 2:4]\n b_pred_wh_half = b_pred_wh/2.\n b_pred_mins = b_pred_xy - b_pred_wh_half\n b_pred_maxes = b_pred_xy + b_pred_wh_half\n\n intersect_mins = K.maximum(b_true_mins, b_pred_mins)\n intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]\n b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]\n union_area = b_true_area + b_pred_area - intersect_area\n # calculate IoU, add epsilon in denominator to avoid dividing by 0\n iou = intersect_area / (union_area + K.epsilon())\n\n # box center distance\n center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)\n # get enclosed area\n enclose_mins = K.minimum(b_true_mins, b_pred_mins)\n enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)\n enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)\n # get enclosed diagonal distance\n enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)\n # calculate DIoU, add epsilon in denominator to avoid dividing by 0\n diou = iou - 1.0 * (center_distance) / (enclose_diagonal + K.epsilon())\n\n if use_ciou:\n # calculate param v and alpha to extend to CIoU\n v = 4*K.square(tf.math.atan2(b_true_wh[..., 0], b_true_wh[..., 1]) - tf.math.atan2(b_pred_wh[..., 0], b_pred_wh[..., 1])) / (math.pi * math.pi)\n\n # a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,\n # to match related description for equation (12) in original paper\n #\n #\n # v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)\n # v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))\n #\n # The dominator w^2+h^2 is usually a small value for the cases\n # h and w ranging in [0; 1], which is likely to yield gradient\n # explosion. And thus in our implementation, the dominator\n # w^2+h^2 is simply removed for stable convergence, by which\n # the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction\n # is still consistent with Eqn. (12).\n v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])\n\n alpha = v / (1.0 - iou + v)\n diou = diou - alpha*v\n\n diou = K.expand_dims(diou, -1)\n return diou\n\n\ndef _smooth_labels(y_true, label_smoothing):\n label_smoothing = K.constant(label_smoothing, dtype=K.floatx())\n return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n\n\ndef yolo3_loss(args, anchors, num_classes, ignore_thresh=.5, label_smoothing=0, elim_grid_sense=False, use_focal_loss=False, use_focal_obj_loss=False, use_softmax_loss=False, use_giou_loss=False, use_diou_loss=True):\n '''\n YOLOv3 loss function.\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n\n if num_layers == 3:\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n scale_x_y = [1.05, 1.1, 1.2] if elim_grid_sense else [None, None, None]\n else:\n anchor_mask = [[3,4,5], [0,1,2]]\n scale_x_y = [1.05, 1.05] if elim_grid_sense else [None, None]\n\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n total_location_loss = 0\n total_confidence_loss = 0\n total_class_loss = 0\n batch_size = K.shape(yolo_outputs[0])[0] # batch size, tensor\n batch_size_f = K.cast(batch_size, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n if label_smoothing:\n true_class_probs = _smooth_labels(true_class_probs, label_smoothing)\n true_objectness_probs = _smooth_labels(object_mask, label_smoothing)\n else:\n true_objectness_probs = object_mask\n\n grid, raw_pred, pred_xy, pred_wh = yolo3_decode(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, scale_x_y=scale_x_y[l], calc_loss=True)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = tf.while_loop(lambda b,*args: b<batch_size, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n if use_focal_obj_loss:\n # Focal loss for objectness confidence\n confidence_loss = sigmoid_focal_loss(true_objectness_probs, raw_pred[...,4:5])\n else:\n confidence_loss = object_mask * K.binary_crossentropy(true_objectness_probs, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n\n if use_focal_loss:\n # Focal loss for classification score\n if use_softmax_loss:\n class_loss = softmax_focal_loss(true_class_probs, raw_pred[...,5:])\n else:\n class_loss = sigmoid_focal_loss(true_class_probs, raw_pred[...,5:])\n else:\n if use_softmax_loss:\n # use softmax style classification output\n class_loss = object_mask * K.expand_dims(K.categorical_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True), axis=-1)\n else:\n # use sigmoid style classification output\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n\n if use_giou_loss:\n # Calculate GIoU loss as location loss\n raw_true_box = y_true[l][...,0:4]\n giou = box_giou(raw_true_box, pred_box)\n giou_loss = object_mask * box_loss_scale * (1 - giou)\n giou_loss = K.sum(giou_loss) / batch_size_f\n location_loss = giou_loss\n elif use_diou_loss:\n # Calculate DIoU loss as location loss\n raw_true_box = y_true[l][...,0:4]\n diou = box_diou(raw_true_box, pred_box)\n diou_loss = object_mask * box_loss_scale * (1 - diou)\n diou_loss = K.sum(diou_loss) / batch_size_f\n location_loss = diou_loss\n else:\n # Standard YOLOv3 location loss\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n xy_loss = K.sum(xy_loss) / batch_size_f\n wh_loss = K.sum(wh_loss) / batch_size_f\n location_loss = xy_loss + wh_loss\n\n confidence_loss = K.sum(confidence_loss) / batch_size_f\n class_loss = K.sum(class_loss) / batch_size_f\n loss += location_loss + confidence_loss + class_loss\n total_location_loss += location_loss\n total_confidence_loss += confidence_loss\n total_class_loss += class_loss\n\n # Fit for tf 2.0.0 loss shape\n loss = K.expand_dims(loss, axis=-1)\n\n return loss, total_location_loss, total_confidence_loss, total_class_loss\n\n" ]
[ [ "tensorflow.keras.backend.binary_crossentropy", "tensorflow.keras.backend.floatx", "tensorflow.minimum", "tensorflow.keras.backend.log", "tensorflow.keras.backend.dtype", "tensorflow.keras.backend.maximum", "tensorflow.boolean_mask", "tensorflow.while_loop", "tensorflow.keras.backend.max", "tensorflow.keras.backend.concatenate", "tensorflow.stop_gradient", "tensorflow.keras.backend.square", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.minimum", "tensorflow.pow", "tensorflow.keras.backend.sum", "tensorflow.math.atan2", "tensorflow.keras.backend.zeros_like", "tensorflow.nn.softmax", "tensorflow.keras.backend.categorical_crossentropy", "tensorflow.sigmoid", "tensorflow.keras.backend.cast", "tensorflow.math.log", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.epsilon" ] ]
klydd/kalyumbasic-generator
[ "d676c7b110879e9470de0550325f137d0ac8f30b" ]
[ "generate_model.py" ]
[ "import markovify\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport re\n\n\ndef generate_source_text():\n data = pd.read_csv('data.csv')\n data.dropna(inplace=True)\n\n split_lines = [line.splitlines() for line in data['text'].values.tolist()]\n lines = np.concatenate(split_lines)\n bad_chars = r'\\s|-|—|_'\n lines = [re.sub(r'\\[.+\\]', '', line) for line in lines]\n lines = [re.sub(f'^({bad_chars})+|({bad_chars})+$', '', line) for line in lines]\n lines = [line[0].lower() + line[1:] for line in lines if line.strip()]\n\n text = '\\n'.join(lines)\n return text\n\n\ndef generate_model():\n text = generate_source_text()\n\n # Build the model\n text_model = markovify.NewlineText(text, well_formed=False)\n with open('model.data', 'wb') as f:\n pickle.dump(text_model, f)\n\n with open('model.data', 'rb') as f:\n model = pickle.load(f)\n\n for _ in range(1, 20):\n print(model.make_sentence())\n\n\nif __name__ == \"__main__\":\n generate_model()\n" ]
[ [ "numpy.concatenate", "pandas.read_csv" ] ]
sakost/cyberlifeNEAT
[ "90c3a11e015e63233bfd8f5992b493e8696c6138" ]
[ "game.py" ]
[ "from abc import ABC\nfrom collections import namedtuple\nfrom enum import IntEnum, auto\n\nimport numpy as np\nimport pygame as pg\n\npg.init()\nfont_style = pg.font.SysFont(\"bahnschrift\", 13)\n\n\nGameAction = namedtuple('GameAction', ['cell_id', 'dx', 'dy'])\n\n\ndef find_cell(game, xr, yr):\n for cell in game.cells:\n if cell.x == xr and cell.y == yr:\n return cell\n\n\nclass AbstractGameObject(ABC):\n def tick(self):\n raise NotImplementedError()\n\n\nclass Game(AbstractGameObject):\n WIDTH, HEIGHT = 1600, 900\n\n def __init__(self, pop_size):\n assert pop_size < Cell.H * Cell.W, \"Population size too big\"\n self.pop_size = pop_size\n self.RES = self.WIDTH, self.HEIGHT = Game.WIDTH, Game.HEIGHT\n self.H_WIDTH, self.H_HEIGHT = self.WIDTH // 2, self.HEIGHT // 2\n self.FPS = 10\n self.screen = pg.display.set_mode(self.RES)\n self.clock = pg.time.Clock()\n\n self.cells_food = np.array([[CellFood(x, y) for x in range(Cell.W)] for y in range(Cell.H)])\n\n self.cells = []\n self.generate()\n\n def draw(self):\n self.screen.fill(pg.Color('black'))\n self.draw_grid()\n self.draw_food()\n self.draw_cells()\n\n def draw_grid(self):\n for x in range(0, self.WIDTH, Cell.TILE):\n pg.draw.line(self.screen, pg.Color('dimgray'), (x, 0), (x, self.HEIGHT))\n for y in range(0, self.HEIGHT, Cell.TILE):\n pg.draw.line(self.screen, pg.Color('dimgray'), (0, y), (self.WIDTH, y))\n\n def _draw_tile(self, color, x, y):\n pg.draw.rect(self.screen, pg.Color(color),\n (x * Cell.TILE + 2, y * Cell.TILE + 2, Cell.TILE - 2, Cell.TILE - 2))\n\n def draw_food(self):\n for y in range(Cell.H):\n for x in range(Cell.W):\n if self.cells_food[y, x].magic:\n pass\n else:\n self._draw_tile('forestgreen', x, y)\n render_hp = font_style.render(f'{self.cells_food[y][x].count}', True, pg.Color('yellow'))\n self.screen.blit(render_hp,\n (x * Cell.TILE + Cell.TILE // 2 - render_hp.get_width() // 2 + 2,\n y * Cell.TILE + Cell.TILE // 2 - render_hp.get_height() // 2 + 2))\n\n def draw_cells(self):\n for cell in self.cells:\n if not cell.died:\n self._draw_tile('red', cell.x, cell.y)\n render_hp = font_style.render(f'{cell.hp}', True, pg.Color('yellow'))\n self.screen.blit(render_hp,\n (cell.x * Cell.TILE + Cell.TILE // 2 - render_hp.get_width()//2 + 2,\n cell.y * Cell.TILE + Cell.TILE // 2 - render_hp.get_height()//2 + 2))\n\n def run(self):\n while True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n exit()\n self.tick()\n\n def generate(self):\n self.cells = [Cell(0, 0, cell_id) for cell_id in range(self.pop_size)]\n coords = [(x, y) for x in range(Cell.W) for y in range(Cell.H)]\n np.random.shuffle(coords)\n for i in range(self.pop_size):\n self.cells[i].x = coords[i][0]\n self.cells[i].y = coords[i][1]\n\n for y in range(Cell.H):\n for x in range(Cell.W):\n self.cells_food[y, x].count = np.random.randint(0, 100)\n self.cells_food[y, x].magic = False\n x = coords[self.pop_size][0]\n y = coords[self.pop_size][1]\n self.cells_food[y, x].magic = False\n\n def restart(self):\n self.cells.clear()\n self.generate()\n\n def update(self, game_action: GameAction):\n cell = self.cells[game_action.cell_id]\n for other_cell in self.cells:\n if cell.x + game_action.dx == other_cell.x and \\\n cell.y + game_action.dy == other_cell.y \\\n and other_cell.cell_id != cell.cell_id:\n return False\n cell.x += game_action.dx\n cell.y += game_action.dy\n cell.heal(self.cells_food[cell.y, cell.x].hit() // Cell.FOOD_DIV)\n return True\n\n def tick(self):\n for cell in self.cells:\n cell.tick()\n for cell in self.cells_food.reshape(-1):\n cell.tick()\n self.display_tick()\n\n def display_tick(self):\n pg.display.set_caption(f\"{self.clock.get_fps()}\")\n pg.display.flip()\n self.clock.tick(self.FPS)\n\n\nclass CellType(IntEnum):\n PEACEFUL = auto()\n\n\nclass Cell(AbstractGameObject):\n TILE = 50\n H = Game.HEIGHT // TILE\n W = Game.WIDTH // TILE\n\n HP_PER_TICK = -13\n\n FOOD_PER_TICK = 40\n FOOD_DIV = 2\n\n MAX_HP = 100\n MIN_HP = 0\n\n def __init__(self, x, y, cell_id=None):\n self.x, self.y = x, y\n\n self.hp = np.random.randint(Cell.MIN_HP + Cell.MAX_HP // 2, Cell.MAX_HP)\n self.type = CellType.PEACEFUL\n\n self.cell_id = cell_id\n\n self.hp_delta = 0\n\n def tick(self):\n delta = self.HP_PER_TICK\n self.hp = max(self.hp + delta, Cell.MIN_HP)\n\n def heal(self, count=None):\n old_hp = self.hp\n if count is None:\n count = Cell.FOOD_PER_TICK // Cell.FOOD_DIV\n self.hp = self.hp + count\n self.hp_delta = self.hp - old_hp\n return self.hp_delta\n\n @property\n def died(self):\n return self.hp <= Cell.MIN_HP\n\n @property\n def alive(self):\n return not self.died\n\n\nclass CellFood(AbstractGameObject):\n MAX_COUNT = 40\n MIN_COUNT = 0\n TILE = Cell.TILE\n HP_DAMAGE = Cell.FOOD_PER_TICK\n FOOD_PER_TICK = 6\n\n def __init__(self, x, y, count=None, magic=False):\n self.x, self.y = x, y\n if count is None:\n count = np.random.randint(CellFood.MIN_COUNT, CellFood.MAX_COUNT)\n self.count = count\n self.min = np.random.randint(CellFood.MIN_COUNT, CellFood.MAX_COUNT // 3)\n self.max = np.random.randint(self.min, CellFood.MAX_COUNT)\n self.per_tick = np.random.randint(0, self.FOOD_PER_TICK)\n self.magic = magic\n\n def hit(self):\n old_count = self.count\n self.count = max(self.count - CellFood.HP_DAMAGE, self.min)\n count = old_count - self.count\n return count\n\n def tick(self):\n self.count = min(self.per_tick + self.count, self.max)\n\n\nif __name__ == '__main__':\n Game(50).run()\n" ]
[ [ "numpy.random.shuffle", "numpy.random.randint" ] ]
ukaukaaaa/YoloAll
[ "7f98a6c32fe90c4cf54f65e52731ae880a6c047d" ]
[ "model_zoo/ScaledYolov4/models/common.py" ]
[ "# This file contains modules common to various models\nimport math\n\nimport torch\nimport torch.nn as nn\n\n# from mc.build.lib.mish_mish import MishCuda as Mish\nfrom utils.activations import Mish\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\n\ndef DWConv(c1, c2, k=1, s=1, act=True):\n # Depthwise convolution\n return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Conv, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = Mish() if act else nn.Identity()\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super(Bottleneck, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(BottleneckCSP, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = Mish()\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass BottleneckCSP2(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(BottleneckCSP2, self).__init__()\n c_ = int(c2) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv3 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) \n self.act = Mish()\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n x1 = self.cv1(x)\n y1 = self.m(x1)\n y2 = self.cv2(x1)\n return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass VoVCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(VoVCSP, self).__init__()\n c_ = int(c2) # hidden channels\n self.cv1 = Conv(c1//2, c_//2, 3, 1)\n self.cv2 = Conv(c_//2, c_//2, 3, 1)\n self.cv3 = Conv(c_, c2, 1, 1)\n\n def forward(self, x):\n _, x1 = x.chunk(2, dim=1)\n x1 = self.cv1(x1)\n x2 = self.cv2(x1)\n return self.cv3(torch.cat((x1,x2), dim=1))\n\n\nclass SPP(nn.Module):\n # Spatial pyramid pooling layer used in YOLOv3-SPP\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super(SPP, self).__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPCSP(nn.Module):\n # CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):\n super(SPPCSP, self).__init__()\n c_ = int(2 * c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = Conv(c_, c_, 3, 1)\n self.cv4 = Conv(c_, c_, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n self.cv5 = Conv(4 * c_, c_, 1, 1)\n self.cv6 = Conv(c_, c_, 3, 1)\n self.bn = nn.BatchNorm2d(2 * c_) \n self.act = Mish()\n self.cv7 = Conv(2 * c_, c2, 1, 1)\n\n def forward(self, x):\n x1 = self.cv4(self.cv3(self.cv1(x)))\n y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))\n y2 = self.cv2(x)\n return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass MP(nn.Module):\n # Spatial pyramid pooling layer used in YOLOv3-SPP\n def __init__(self, k=2):\n super(MP, self).__init__()\n self.m = nn.MaxPool2d(kernel_size=k, stride=k)\n\n def forward(self, x):\n return self.m(x)\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Focus, self).__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super(Concat, self).__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass Flatten(nn.Module):\n # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions\n @staticmethod\n def forward(x):\n return x.view(x.size(0), -1)\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super(Classify, self).__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)\n self.flat = Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)\n\n \nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport collections\n\n\nclass CombConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel=1, stride=1, dropout=0.1, bias=False):\n super().__init__()\n self.add_module('layer1',ConvLayer(in_channels, out_channels, kernel))\n self.add_module('layer2',DWConvLayer(out_channels, out_channels, stride=stride))\n \n def forward(self, x):\n return super().forward(x)\n\nclass DWConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, stride=1, bias=False):\n super().__init__()\n out_ch = out_channels\n \n groups = in_channels\n kernel = 3\n #print(kernel, 'x', kernel, 'x', out_channels, 'x', out_channels, 'DepthWise')\n \n self.add_module('dwconv', nn.Conv2d(groups, groups, kernel_size=3,\n stride=stride, padding=1, groups=groups, bias=bias))\n self.add_module('norm', nn.BatchNorm2d(groups))\n def forward(self, x):\n return super().forward(x) \n\nclass ConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel=3, stride=1, dropout=0.1, bias=False):\n super().__init__()\n out_ch = out_channels\n groups = 1\n #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels)\n self.add_module('conv', nn.Conv2d(in_channels, out_ch, kernel_size=kernel, \n stride=stride, padding=kernel//2, groups=groups, bias=bias))\n self.add_module('norm', nn.BatchNorm2d(out_ch))\n self.add_module('relu', nn.ReLU6(True)) \n def forward(self, x):\n return super().forward(x)\n\n\nclass HarDBlock(nn.Module):\n def get_link(self, layer, base_ch, growth_rate, grmul):\n if layer == 0:\n return base_ch, 0, []\n out_channels = growth_rate\n link = []\n for i in range(10):\n dv = 2 ** i\n if layer % dv == 0:\n k = layer - dv\n link.append(k)\n if i > 0:\n out_channels *= grmul\n out_channels = int(int(out_channels + 1) / 2) * 2\n in_channels = 0\n for i in link:\n ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul)\n in_channels += ch\n return out_channels, in_channels, link\n\n def get_out_ch(self):\n return self.out_channels\n\n def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, residual_out=False, dwconv=False):\n super().__init__()\n self.keepBase = keepBase\n self.links = []\n layers_ = []\n self.out_channels = 0 # if upsample else in_channels\n for i in range(n_layers):\n outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul)\n self.links.append(link)\n use_relu = residual_out\n if dwconv:\n layers_.append(CombConvLayer(inch, outch))\n else:\n layers_.append(Conv(inch, outch, k=3))\n\n if (i % 2 == 0) or (i == n_layers - 1):\n self.out_channels += outch\n #print(\"Blk out =\",self.out_channels)\n self.layers = nn.ModuleList(layers_)\n \n def forward(self, x):\n layers_ = [x]\n \n for layer in range(len(self.layers)):\n link = self.links[layer]\n tin = []\n for i in link:\n tin.append(layers_[i])\n if len(tin) > 1: \n x = torch.cat(tin, 1)\n else:\n x = tin[0]\n out = self.layers[layer](x)\n layers_.append(out)\n \n t = len(layers_)\n out_ = []\n for i in range(t):\n if (i == 0 and self.keepBase) or (i == t-1) or (i%2 == 1):\n out_.append(layers_[i])\n out = torch.cat(out_, 1)\n return out \n \n\nclass BRLayer(nn.Sequential):\n def __init__(self, in_channels):\n super().__init__()\n \n self.add_module('norm', nn.BatchNorm2d(in_channels))\n self.add_module('relu', nn.ReLU(True))\n def forward(self, x):\n return super().forward(x)\n\n\nclass HarDBlock2(nn.Module):\n def get_link(self, layer, base_ch, growth_rate, grmul):\n if layer == 0:\n return base_ch, 0, []\n out_channels = growth_rate\n link = []\n for i in range(10):\n dv = 2 ** i\n if layer % dv == 0:\n k = layer - dv\n link.insert(0, k)\n if i > 0:\n out_channels *= grmul\n out_channels = int(int(out_channels + 1) / 2) * 2\n in_channels = 0\n for i in link:\n ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul)\n in_channels += ch\n return out_channels, in_channels, link\n\n def get_out_ch(self):\n return self.out_channels\n\n def __init__(self, in_channels, growth_rate, grmul, n_layers, dwconv=False):\n super().__init__()\n self.links = []\n conv_layers_ = []\n bnrelu_layers_ = []\n self.layer_bias = []\n self.out_channels = 0\n self.out_partition = collections.defaultdict(list)\n\n for i in range(n_layers):\n outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul)\n self.links.append(link)\n for j in link:\n self.out_partition[j].append(outch)\n\n cur_ch = in_channels\n for i in range(n_layers):\n accum_out_ch = sum( self.out_partition[i] )\n real_out_ch = self.out_partition[i][0]\n #print( self.links[i], self.out_partition[i], accum_out_ch)\n conv_layers_.append( nn.Conv2d(cur_ch, accum_out_ch, kernel_size=3, stride=1, padding=1, bias=True) )\n bnrelu_layers_.append( BRLayer(real_out_ch) )\n cur_ch = real_out_ch\n if (i % 2 == 0) or (i == n_layers - 1):\n self.out_channels += real_out_ch\n #print(\"Blk out =\",self.out_channels)\n\n self.conv_layers = nn.ModuleList(conv_layers_)\n self.bnrelu_layers = nn.ModuleList(bnrelu_layers_)\n \n def transform(self, blk, trt=False):\n # Transform weight matrix from a pretrained HarDBlock v1\n in_ch = blk.layers[0][0].weight.shape[1]\n for i in range(len(self.conv_layers)):\n link = self.links[i].copy()\n link_ch = [blk.layers[k-1][0].weight.shape[0] if k > 0 else \n blk.layers[0 ][0].weight.shape[1] for k in link]\n part = self.out_partition[i]\n w_src = blk.layers[i][0].weight\n b_src = blk.layers[i][0].bias\n \n \n self.conv_layers[i].weight[0:part[0], :, :,:] = w_src[:, 0:in_ch, :,:]\n self.layer_bias.append(b_src)\n \n if b_src is not None:\n if trt:\n self.conv_layers[i].bias[1:part[0]] = b_src[1:]\n self.conv_layers[i].bias[0] = b_src[0]\n self.conv_layers[i].bias[part[0]:] = 0\n self.layer_bias[i] = None\n else:\n #for pytorch, add bias with standalone tensor is more efficient than within conv.bias\n #this is because the amount of non-zero bias is small, \n #but if we use conv.bias, the number of bias will be much larger\n self.conv_layers[i].bias = None\n else:\n self.conv_layers[i].bias = None \n\n in_ch = part[0]\n link_ch.reverse()\n link.reverse()\n if len(link) > 1:\n for j in range(1, len(link) ):\n ly = link[j]\n part_id = self.out_partition[ly].index(part[0])\n chos = sum( self.out_partition[ly][0:part_id] )\n choe = chos + part[0]\n chis = sum( link_ch[0:j] )\n chie = chis + link_ch[j]\n self.conv_layers[ly].weight[chos:choe, :,:,:] = w_src[:, chis:chie,:,:]\n \n #update BatchNorm or remove it if there is no BatchNorm in the v1 block\n self.bnrelu_layers[i] = None\n if isinstance(blk.layers[i][1], nn.BatchNorm2d):\n self.bnrelu_layers[i] = nn.Sequential(\n blk.layers[i][1],\n blk.layers[i][2])\n else:\n self.bnrelu_layers[i] = blk.layers[i][1]\n \n\n def forward(self, x):\n layers_ = []\n outs_ = []\n xin = x\n for i in range(len(self.conv_layers)):\n link = self.links[i]\n part = self.out_partition[i]\n\n xout = self.conv_layers[i](xin)\n layers_.append(xout)\n\n xin = xout[:,0:part[0],:,:] if len(part) > 1 else xout\n #print(i)\n #if self.layer_bias[i] is not None:\n # xin += self.layer_bias[i].view(1,-1,1,1)\n\n if len(link) > 1:\n for j in range( len(link) - 1 ):\n ly = link[j]\n part_id = self.out_partition[ly].index(part[0])\n chs = sum( self.out_partition[ly][0:part_id] )\n che = chs + part[0] \n \n xin += layers_[ly][:,chs:che,:,:]\n \n xin = self.bnrelu_layers[i](xin)\n\n if i%2 == 0 or i == len(self.conv_layers)-1:\n outs_.append(xin)\n\n out = torch.cat(outs_, 1)\n return out" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU6", "torch.cat", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
iuming/AI_Computer_Systems
[ "b47c4914a23acfdc469e1a80735bf68191b2acba" ]
[ "AICSE-demo-student/demo/yolov3/yolov3-release/evaluate.py" ]
[ "#! /usr/bin/env python\n# coding=utf-8\n\nimport cv2\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nimport time\nimport argparse\nfrom core.config import cfg\nfrom core.yolov3 import YOLOV3\n\nclass YoloTest(object):\n def __init__(self):\n self.input_size = cfg.TEST.INPUT_SIZE\n self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE\n self.classes = utils.read_class_names(cfg.YOLO.CLASSES)\n self.num_classes = len(self.classes)\n self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))\n self.score_threshold = cfg.TEST.SCORE_THRESHOLD\n self.iou_threshold = cfg.TEST.IOU_THRESHOLD\n self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY\n self.annotation_path = cfg.TEST.ANNOT_PATH\n self.number = cfg.TEST.NUMBER\n self.weight_file = cfg.TEST.WEIGHT_FILE\n self.model_file = cfg.TEST.MODEL_FILE\n self.write_image = cfg.TEST.WRITE_IMAGE\n self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH\n self.show_label = cfg.TEST.SHOW_LABEL\n self.batch_size = cfg.TEST.BATCH_SIZE\n\n self.core_version = cfg.RUNTIME.CORE_VERSION\n self.precision = cfg.RUNTIME.PRECISION\n self.data_parallelism = cfg.RUNTIME.DATA_PARALLELISM\n self.model_parallelism = cfg.RUNTIME.MODEL_PARALLELISM\n self.core_num = cfg.RUNTIME.CORE_NUM\n\n if os.path.exists(self.model_file):\n print (\"model is exit\")\n else :\n print (\"please check out model_file\")\n graph = load_graph(self.model_file)\n self.input_data = graph.get_tensor_by_name(\"import/input/input_data:0\" )\n self.pred_sbbox = graph.get_tensor_by_name(\"import/pred_sbbox/concat_2:0\" )\n self.pred_mbbox = graph.get_tensor_by_name(\"import/pred_mbbox/concat_2:0\" )\n self.pred_lbbox = graph.get_tensor_by_name(\"import/pred_lbbox/concat_2:0\" )\n config = tf.ConfigProto(allow_soft_placement=True,\n \t\t inter_op_parallelism_threads=1,\n intra_op_parallelism_threads=1)\n config.mlu_options.data_parallelism = self.data_parallelism\n config.mlu_options.model_parallelism = self.model_parallelism\n config.mlu_options.core_num = self.core_num\n config.mlu_options.core_version = self.core_version\n config.mlu_options.precision = self.precision\n\n self.sess = tf.Session(config = config, graph = graph)\n\n def predict(self, images):\n\n org_h = [0 for i in range(self.batch_size)]\n org_w = [0 for i in range(self.batch_size)]\n for i in range(self.batch_size):\n org_h[i], org_w[i], _ = images[i].shape\n\n image_data = utils.images_preporcess(images, [self.input_size, self.input_size])\n\n start = time.time()\n pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(\n [self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],\n feed_dict={\n self.input_data: image_data,\n }\n )\n end = time.time()\n print(\"inference time exclude postprocess is: \", (end-start) * 1000)\n batch_bboxes = []\n for idx in range(self.batch_size):\n pred_bbox = np.concatenate([np.reshape(pred_sbbox[idx], (-1, 5 + self.num_classes)),\n np.reshape(pred_mbbox[idx], (-1, 5 + self.num_classes)),\n np.reshape(pred_lbbox[idx], (-1, 5 + self.num_classes))], axis=0)\n bboxes = utils.postprocess_boxes(pred_bbox, (org_h[idx], org_w[idx]), self.input_size, self.score_threshold)\n batch_bboxes.append(utils.nms(bboxes, self.iou_threshold))\n end_ = time.time()\n print(\"inference time include postprocess is: \", (end_-start) * 1000)\n return batch_bboxes, (end - start)\n\n def evaluate(self):\n predicted_dir_path = self.write_image_path + '/mAP/predicted'\n ground_truth_dir_path = self.write_image_path + '/mAP/ground-truth'\n if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)\n if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)\n if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)\n os.makedirs(predicted_dir_path)\n os.makedirs(ground_truth_dir_path)\n\n batch_idx = 0\n alltime_sess = 0\n start = []\n end = []\n start_end2end = 0.0\n start_post = 0.0\n end_post = 0.0\n alltime_end2end = 0.0\n alltime_prepare = 0.0\n alltime_post = 0.0\n alltime_sess_run = 0.0\n\n batch_count = 0\n batch_image = []\n batch_image_name = []\n with open(self.annotation_path, 'r') as annotation_file:\n for num, line in enumerate(annotation_file):\n if batch_idx == 0:\n start_end2end = time.time()\n annotation = line.strip().split()\n image_path = annotation[0]\n image_name = image_path.split('/')[-1]\n batch_image_name.append(image_name)\n image = cv2.imread(image_path)\n batch_image.append(image)\n bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])\n\n if len(bbox_data_gt) == 0:\n bboxes_gt=[]\n classes_gt=[]\n else:\n bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]\n ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')\n\n num_bbox_gt = len(bboxes_gt)\n with open(ground_truth_path, 'w') as f:\n for i in range(num_bbox_gt):\n class_name = self.classes[classes_gt[i]]\n xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))\n bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\\n'\n f.write(bbox_mess)\n if batch_idx < self.batch_size - 1:\n batch_idx += 1\n continue\n\n print(\"=> Predicting %d th batch images.\" % (batch_count + 1))\n start.append(time.time())\n bboxes_pr, sess_run_time = self.predict(batch_image)\n end.append(time.time())\n if batch_count > 0:\n alltime_sess_run += sess_run_time\n duration_time = (end[batch_count] - start[batch_count])\n alltime_sess += duration_time\n alltime_prepare = alltime_prepare + (start[batch_count] - start_end2end)\n if self.write_image:\n for idx in range(self.batch_size):\n image = utils.draw_bbox(batch_image[idx], bboxes_pr[idx], show_label=self.show_label)\n cv2.imwrite(self.write_image_path+\"/\"+batch_image_name[idx], image)\n\n for idx in range(self.batch_size):\n predict_result_path = os.path.join(predicted_dir_path,\n str(batch_count * self.batch_size + idx) + '.txt')\n with open(predict_result_path, 'w') as f:\n for bbox in bboxes_pr[idx]:\n coor = np.array(bbox[:4], dtype=np.int32)\n score = bbox[4]\n class_ind = int(bbox[5])\n class_name = self.classes[class_ind + 1]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax = list(map(str, coor))\n bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\\n'\n f.write(bbox_mess)\n if batch_count > 0:\n temp = time.time()\n alltime_end2end = alltime_end2end + (temp - start_end2end)\n alltime_post = alltime_post + temp - end[batch_count]\n batch_count += 1\n if self.number < (batch_count + 1) * self.batch_size:\n print(\"we have evaluated %d batch images\"%(batch_count))\n break\n batch_idx = 0\n batch_image = []\n batch_image_name = []\n if(self.number > 1):\n print('latency: %f (ms)' % (alltime_sess_run * 1000 / (batch_count - 1)))\n print('throughput: %f' % (((batch_count - 1) * self.batch_size) / alltime_sess_run))\n\n def voc_2012_test(self, voc2012_test_path):\n\n img_inds_file = os.path.join(voc2012_test_path, 'ImageSets', 'Main', 'test.txt')\n with open(img_inds_file, 'r') as f:\n txt = f.readlines()\n image_inds = [line.strip() for line in txt]\n\n results_path = 'results/VOC2012/Main'\n if os.path.exists(results_path):\n shutil.rmtree(results_path)\n os.makedirs(results_path)\n\n for image_ind in image_inds:\n image_path = os.path.join(voc2012_test_path, 'JPEGImages', image_ind + '.jpg')\n image = cv2.imread(image_path)\n\n print('predict result of %s:' % image_ind)\n bboxes_pr = self.predict(image)\n for bbox in bboxes_pr:\n coor = np.array(bbox[:4], dtype=np.int32)\n score = bbox[4]\n class_ind = int(bbox[5])\n class_name = self.classes[class_ind]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax = list(map(str, coor))\n bbox_mess = ' '.join([image_ind, score, xmin, ymin, xmax, ymax]) + '\\n'\n with open(os.path.join(results_path, 'comp4_det_test_' + class_name + '.txt'), 'a') as f:\n f.write(bbox_mess)\n print('\\t' + str(bbox_mess).strip())\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--result_path\", help=\"result path to write\")\n parser.add_argument(\"--records\", help=\"records to be processed\")\n parser.add_argument(\"--number\", type=int, help=\"number of records to be processed\")\n parser.add_argument(\"--core_version\", type=str, help=\"MLU100/MLU270\", default=\"MLU100\")\n parser.add_argument(\"--precision\", type=str, help=\"float/int8\", default=\"float\")\n parser.add_argument(\"--data_parallelism\", type=int, help=\"data_parallelism\")\n parser.add_argument(\"--model_parallelism\", type=int, help=\"model_parallelism\")\n parser.add_argument(\"--core_num\", type=int, help=\"core_num\")\n parser.add_argument(\"--input_size\", type=int, help=\"choose the input size\", default=416)\n parser.add_argument(\"--batch_size\", type=int, help=\"batch size\")\n args = parser.parse_args()\n if args.graph:\n cfg.TEST.MODEL_FILE = args.graph\n if args.result_path:\n cfg.TEST.WRITE_IMAGE_PATH = args.result_path\n if args.records:\n cfg.TEST.ANNOT_PATH = args.records\n if args.number:\n cfg.TEST.NUMBER = args.number\n if args.core_version:\n cfg.RUNTIME.CORE_VERSION = args.core_version\n if args.precision:\n cfg.RUNTIME.PRECISION = args.precision\n if args.data_parallelism:\n cfg.RUNTIME.DATA_PARALLELISM = args.data_parallelism\n if args.model_parallelism:\n cfg.RUNTIME.MODEL_PARALLELISM = args.model_parallelism\n if args.core_num:\n cfg.RUNTIME.CORE_NUM = args.core_num\n if args.input_size:\n cfg.TEST.INPUT_SIZE = args.input_size\n if args.batch_size:\n cfg.TEST.BATCH_SIZE = args.batch_size\n\n YoloTest().evaluate()\n\n\n\n" ]
[ [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.reshape", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.GraphDef", "numpy.array" ] ]
IgoshinLab/pix2pixHD-HE
[ "f4d61cc101ce8af3f236d578feef3a7a048bb41d" ]
[ "dataloaders/utils/general/helperFunctions.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch import autograd\nfrom torch.autograd import Variable\n\n\ndef calc_gradient_penalty(model, real_data, fake_data, conditional_data, use_cuda=False):\n '''\n Copied from https://github.com/caogang/wgan-gp\n '''\n LAMBDA = 10\n BATCH_SIZE = real_data.size()[0]\n\n alpha = torch.rand(BATCH_SIZE, 1)\n alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement() / BATCH_SIZE)).contiguous().view_as(real_data)\n alpha = alpha.cuda() if use_cuda else alpha\n alpha = Variable(alpha, requires_grad=True)\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n disc_interpolates = model(conditional_data, interpolates)\n\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_cuda else torch.ones(\n disc_interpolates.size()),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n # Derivatives of the gradient close to 0 can cause problems because of\n # the square root, so manually calculate norm and add epsilon\n grad_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n gradient_penalty = ((grad_norm - 1) ** 2).mean() * LAMBDA\n\n return gradient_penalty, grad_norm\n\n\n# Register forward hook\nclass Hook:\n def __init__(self):\n self.feature = []\n\n def clear(self):\n self.feature = []\n\n def hook(self, module, fea_in, fea_out):\n self.feature.append(fea_out)\n return None\n\n\n" ]
[ [ "torch.sum", "torch.rand", "torch.autograd.Variable" ] ]
laurelkeys/intimo
[ "f5c8200e52e4aeb9c04b4988a61dbc66c04f8255" ]
[ "misc/hide.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport soundfile as sf\nimport sounddevice as sd\n\nfrom sys import argv\nfrom codec import *\nfrom converter import *\nfrom scipy.io.wavfile import write\n\ndef print_stats(arr, arr_name=''):\n print(f'{arr_name}:', arr.dtype, arr.shape, arr.min(), arr.max())\n\ndef show(bgr_img):\n plt.axis('off')\n rgb_img = cv2.cvtColor(bgr_img.astype('uint8'), cv2.COLOR_BGR2RGB)\n plt.imshow(rgb_img, vmin=0, vmax=255)\n plt.show()\n\nSHOW_IMAGE = False\nPLAY_AUDIO = False\nOUTPUT_FOLDER = '.'\n\naudio_fname = 'output-mono.wav'\nmono_audio, sr = sf.read(audio_fname, dtype='int16')\nif PLAY_AUDIO: sd.play(mono_audio, sr); sd.wait() # wait until file is done playing\n\nimg_fname = 'kojima.png'\nbgr_img = cv2.imread(img_fname, cv2.IMREAD_COLOR)\nif SHOW_IMAGE: show(bgr_img)\n\n# the hidden message is the audio 'converted' from int16 to uint8\nmessage = int8_to_uint8(int16_to_int8(mono_audio))\n\nprint_stats(mono_audio, 'mono_audio')\nprint_stats(bgr_img, 'bgr_img')\nprint_stats(message, 'message')\nprint((int8_to_int16(uint8_to_int8(message)) == mono_audio).all())\n\nBIT_PLANE = 7\n\n__bgr_img = encode(bgr_img, BIT_PLANE, message)\nprint_stats(__bgr_img, '__bgr_img')\nif SHOW_IMAGE: show(__bgr_img)\ncv2.imwrite('kojima-mono-7.png', __bgr_img)\n\n__message = decode(__bgr_img, BIT_PLANE)\n__message = __message[:message.size] # FIXME add an 'end of message' marker on `encode`\nprint_stats(__message, '__message')\nprint((__message == message).all())\n\n__mono_audio = int8_to_int16(uint8_to_int8(__message))\nprint_stats(__mono_audio, '__mono_audio')\nprint((__mono_audio == mono_audio).all())\n\n# play the message that was decoded from the image and 'converted' from uint8 to int16\nif PLAY_AUDIO: sd.play(__mono_audio, 8000); sd.wait()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
kenakai16/gpointconv
[ "dcdb079ff00ce1c13b426e7604278fa4326d8ba7" ]
[ "eval_cls_gconv.py" ]
[ "import argparse\nimport os\nimport sys\nimport numpy as np \nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn.functional as F\nfrom model.group_pointconv import GroupPointConvDensityClsSsg as GroupPointConvDensityClsSsg\nfrom data_utils.ModelNetDataLoader import ModelNetDataLoader\nimport datetime\nimport logging\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport math\nfrom random import randint\nimport random\n\n\ndef parse_args():\n '''PARAMETERS'''\n parser = argparse.ArgumentParser('GPointConv')\n parser.add_argument('--batchsize', type=int, default=32, help='batch size')\n parser.add_argument('--gpu', type=str, default='cpu', help='specify gpu device')\n parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')\n parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\n parser.add_argument('--num_workers', type=int, default=16, help='Worker Number [default: 16]')\n parser.add_argument('--model_name', default='gpointconv', help='model name')\n parser.add_argument('--normal', action='store_true', default=True, help='Whether to use normal information [default: False]')\n return parser.parse_args()\n\n\ndef main(args):\n '''HYPER PARAMETER'''\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n device = args.gpu\n\n '''CREATE DIR'''\n experiment_dir = Path('./roration_eval_experiment/')\n experiment_dir.mkdir(exist_ok=True)\n file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-'%args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))\n file_dir.mkdir(exist_ok=True)\n checkpoints_dir = file_dir.joinpath('checkpoints_rotation/')\n checkpoints_dir.mkdir(exist_ok=True)\n os.system('cp %s %s' % (args.checkpoint, checkpoints_dir))\n log_dir = file_dir.joinpath('logs/')\n log_dir.mkdir(exist_ok=True)\n\n '''LOG'''\n args = parse_args()\n logger = logging.getLogger(args.model_name)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(str(log_dir) + 'eval_%s_cls.txt'%args.model_name)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.info('---------------------------------------------------EVAL---------------------------------------------------')\n logger.info('PARAMETER ...')\n logger.info(args)\n\n '''DATA LOADING'''\n logger.info('Load dataset ...')\n DATA_PATH = 'C:\\\\Users\\\\nghiahoang\\\\Desktop\\\\OneDrive_1_2-11-2021\\\\dataset\\\\modelnet40_normal_resampled'\n\n\n TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)\n testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers)\n\n logger.info(\"The number of test data is: %d\", len(TEST_DATASET))\n\n seed = 3\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n '''MODEL LOADING'''\n num_class = 40\n classifier = GroupPointConvDensityClsSsg(num_classes=num_class).to(device)\n\n if args.checkpoint is not None:\n print('Load CheckPoint from {}'.format(args.checkpoint))\n logger.info('Load CheckPoint')\n # Load\n checkpoint = torch.load(args.checkpoint, map_location=device)\n classifier.load_state_dict(checkpoint['model_state_dict'])\n\n else:\n print('Please load Checkpoint to eval...')\n sys.exit(0)\n\n '''EVAL'''\n logger.info('Start evaluating...')\n print('Start evaluating...')\n accuracy = test_original(classifier, testDataLoader)\n\n logger.info('Total Accuracy: %f'%accuracy)\n logger.info('End of evaluation...')\n\n\n#--------------------------------------------------------#\ndef test_original(model, loader):\n device = torch.device('cpu')\n total_correct = 0.0\n total_seen = 0.0\n for j, data in enumerate(loader, 0):\n points, target = data\n target = target[:, 0]\n points = points.transpose(2, 1)\n points, target = points.to(device), target.to(device)\n classifier = model.eval()\n with torch.no_grad():\n pred = classifier(points[:, :3, :], points[:, 3:, :])\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.long().data).cpu().sum()\n total_correct += correct.item()\n total_seen += float(points.size()[0])\n\n accuracy = total_correct / total_seen\n return accuracy\n\ndef test_rotation_group(model, loader, split_group = None ,name=None):\n device = torch.device('cpu')\n\n G24 = torch.from_numpy(np.array([\n [[1, 0, 0], [0, 1, 0], [0, 0, 1]],\n [[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n [[1, 0, 0], [0, -1, 0], [0, 0, -1]],\n [[1, 0, 0], [0, 0, 1], [0, -1, 0]],\n\n [[0, -1, 0], [1, 0, 0], [0, 0, 1]],\n [[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n [[0, 1, 0], [1, 0, 0], [0, 0, -1]],\n [[0, 0, -1], [1, 0, 0], [0, -1, 0]],\n\n [[-1, 0, 0], [0, -1, 0], [0, 0, 1]],\n [[-1, 0, 0], [0, 0, -1], [0, -1, 0]],\n [[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n [[-1, 0, 0], [0, 0, 1], [0, 1, 0]],\n\n [[0, 1, 0], [-1, 0, 0], [0, 0, 1]],\n [[0, 0, 1], [-1, 0, 0], [0, -1, 0]],\n [[0, -1, 0], [-1, 0, 0], [0, 0, -1]],\n [[0, 0, -1], [-1, 0, 0], [0, 1, 0]],\n\n [[0, 0, -1], [0, 1, 0], [1, 0, 0]],\n [[0, 1, 0], [0, 0, 1], [1, 0, 0]],\n [[0, 0, 1], [0, -1, 0], [1, 0, 0]],\n [[0, -1, 0], [0, 0, -1], [1, 0, 0]],\n\n [[0, 0, -1], [0, -1, 0], [-1, 0, 0]],\n [[0, -1, 0], [0, 0, 1], [-1, 0, 0]],\n [[0, 0, 1], [0, 1, 0], [-1, 0, 0]],\n [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]\n ])).float()\n\n if split_group != None:\n r_group = G24[split_group]\n\n total_correct = 0.0\n total_seen = 0.0\n for j, data in enumerate(loader, 0):\n points, target = data\n target = target[:, 0]\n points[:,:,:3] = torch.matmul(points[:,:,:3], r_group) #rotate-sample\n points = points.transpose(2, 1)\n points, target = points.to(device), target.to(device)\n classifier = model.eval()\n with torch.no_grad():\n pred = classifier(points[:, :3, :], points[:, 3:, :])\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.long().data).cpu().sum()\n total_correct += correct.item()\n total_seen += float(points.size()[0])\n\n accuracy = total_correct / total_seen\n print(\"test_rotation_group:\", accuracy)\n return accuracy\n\ndef test_random_angel(model, loader, coordinates = \"Rx\" , phase =\"custom\"):\n device = torch.device('cpu')\n total_correct = 0.0\n total_seen = 0.0\n for j, data in enumerate(loader, 0):\n points, target = data\n target = target[:, 0]\n\n random.seed(j)\n\n if phase == \"custom\":\n r = random.Random(j)\n alpha = r.choice([randint(0, 30),randint(60, 120),randint(150, 180)])\n rotation_angle = alpha*np.pi / 180.\n\n elif phase == \"random\":\n alpha = randint(0, 180)\n rotation_angle = alpha*np.pi / 180.\n\n points[:,:,:3] = rotate_point_cloud_by_angle(points[:,:,:3], coordinates, rotation_angle) #rotate-sample\n\n points = points.transpose(2, 1)\n points, target = points.to(device), target.to(device)\n classifier = model.eval()\n with torch.no_grad():\n pred = classifier(points[:, :3, :], points[:, 3:, :])\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.long().data).cpu().sum()\n total_correct += correct.item()\n total_seen += float(points.size()[0])\n\n accuracy = total_correct / total_seen\n print(\"random angel acc:\", accuracy)\n return accuracy\n\ndef rotate_point_cloud_by_angle(batch_data, coordinates = \"Rx\" , rotation_angle=np.pi/2):\n \"\"\" Rotate the point cloud along up direction with certain angle.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n\n batch_data = batch_data.cpu().detach().numpy()\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n\n\n Rx = np.array([[1, 0, 0],\n [0, cosval, -sinval],\n [0, sinval, cosval]])\n\n Ry = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n\n Rz = np.array([[cosval, -sinval, 0],\n [sinval, cosval, 0],\n [0, 0, 1]])\n\n R = np.dot(Rz, np.dot(Ry, Rx))\n\n if coordinates==\"Rx\":\n rotated_matrix = Rx\n elif coordinates==\"Ry\":\n rotated_matrix = Ry\n elif coordinates==\"Rz\":\n rotated_matrix = Rz\n else:\n rotated_matrix = R\n\n shape_pc = batch_data[k,:,0:3]\n rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotated_matrix)\n rotated_data= torch.from_numpy(rotated_data)\n return rotated_data\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n" ]
[ [ "numpy.dot", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.cos", "numpy.sin", "torch.matmul", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device", "numpy.array", "numpy.zeros" ] ]
hwfan/STRAPS-3DHumanShapePose
[ "ed12f1b52de43ca77a7ad8566314e9bbd1a364af" ]
[ "models/resnet.py" ]
[ "\"\"\"\nCopied from pytorch source code (I've just removed the final FC layer).\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',\n 'wide_resnet50_2', 'wide_resnet101_2']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, in_channels, num_classes=1000, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n # self.fc = nn.Linear(512 * block.expansion, num_classes) - don't need final FC layer\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n\n return x\n\n\ndef _resnet(arch, block, layers, in_channels, pretrained, progress, **kwargs):\n model = ResNet(block, layers, in_channels, **kwargs)\n if pretrained:\n state_dict = model_zoo.load_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict, strict=False) # not using final FC layer\n return model\n\n\ndef resnet18(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], in_channels, pretrained, progress,\n **kwargs)\n\n\ndef resnet34(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], in_channels, pretrained, progress,\n **kwargs)\n\n\ndef resnet50(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], in_channels, pretrained, progress,\n **kwargs)\n\n\ndef resnet101(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], in_channels, pretrained, progress,\n **kwargs)\n\n\ndef resnet152(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], in_channels, pretrained, progress,\n **kwargs)\n\n\ndef resnext50_32x4d(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], in_channels,\n pretrained, progress, **kwargs)\n\n\ndef resnext101_32x8d(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], in_channels,\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet50_2(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet101_2(in_channels, pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.flatten", "torch.nn.ReLU", "torch.utils.model_zoo.load_url", "torch.nn.init.kaiming_normal_" ] ]
boangri/uai-thesis-notebooks
[ "f287cef36d1533d2526d0d71da0c55e8b633e8c2" ]
[ "Pong/pong_karpathy.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5\n\n# In[ ]:\n\n\n\"\"\" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. \"\"\"\nimport numpy as np\n# import cPickle as pickle\nimport pickle\nimport gym\n\n# hyperparameters\nH = 200 # number of hidden layer neurons\nbatch_size = 10 # every how many episodes to do a param update?\nlearning_rate = 1e-4\ngamma = 0.99 # discount factor for reward\ndecay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2\nresume = True # resume from previous checkpoint?\nrender = False\n\n# model initialization\nD = 80 * 80 # input dimensionality: 80x80 grid\nif resume:\n model = pickle.load(open('save.p', 'rb'))\nelse:\n model = {}\n model['W1'] = np.random.randn(H,D) / np.sqrt(D) # \"Xavier\" initialization\n model['W2'] = np.random.randn(H) / np.sqrt(H)\n \ngrad_buffer = { k : np.zeros_like(v) for k,v in model.items() } # update buffers that add up gradients over a batch\nrmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } # rmsprop memory\n\ndef sigmoid(x): \n return 1.0 / (1.0 + np.exp(-x)) # sigmoid \"squashing\" function to interval [0,1]\n\ndef prepro(I):\n \"\"\" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector \"\"\"\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()\n\ndef discount_rewards(r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\ndef policy_forward(x):\n h = np.dot(model['W1'], x)\n h[h<0] = 0 # ReLU nonlinearity\n logp = np.dot(model['W2'], h)\n p = sigmoid(logp)\n return p, h # return probability of taking action 2, and hidden state\n\ndef policy_backward(eph, epdlogp):\n \"\"\" backward pass. (eph is array of intermediate hidden states) \"\"\"\n dW2 = np.dot(eph.T, epdlogp).ravel()\n dh = np.outer(epdlogp, model['W2'])\n dh[eph <= 0] = 0 # backpro prelu\n dW1 = np.dot(dh.T, epx)\n return {'W1':dW1, 'W2':dW2}\n\nenv = gym.make(\"Pong-v0\")\nobservation = env.reset()\nprev_x = None # used in computing the difference frame\nxs,hs,dlogps,drs = [],[],[],[]\nrunning_reward = None\nreward_sum = 0\nepisode_number = 0\nwhile True:\n if render: env.render()\n\n # preprocess the observation, set input to network to be difference image\n cur_x = prepro(observation)\n x = cur_x - prev_x if prev_x is not None else np.zeros(D)\n prev_x = cur_x\n\n # forward the policy network and sample an action from the returned probability\n aprob, h = policy_forward(x)\n action = 2 if np.random.uniform() < aprob else 3 # roll the dice!\n\n # record various intermediates (needed later for backprop)\n xs.append(x) # observation\n hs.append(h) # hidden state\n y = 1 if action == 2 else 0 # a \"fake label\"\n dlogps.append(y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused)\n\n # step the environment and get new measurements\n observation, reward, done, info = env.step(action)\n reward_sum += reward\n\n drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)\n\n if done: # an episode finished\n episode_number += 1\n\n # stack together all inputs, hidden states, action gradients, and rewards for this episode\n epx = np.vstack(xs)\n eph = np.vstack(hs)\n epdlogp = np.vstack(dlogps)\n epr = np.vstack(drs)\n xs,hs,dlogps,drs = [],[],[],[] # reset array memory\n\n # compute the discounted reward backwards through time\n discounted_epr = discount_rewards(epr)\n # standardize the rewards to be unit normal (helps control the gradient estimator variance)\n discounted_epr -= np.mean(discounted_epr)\n discounted_epr /= np.std(discounted_epr)\n\n epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.)\n grad = policy_backward(eph, epdlogp)\n for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch\n\n # perform rmsprop parameter update every batch_size episodes\n if episode_number % batch_size == 0:\n for k,v in model.items():\n g = grad_buffer[k] # gradient\n rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2\n model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)\n grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer\n\n # boring book-keeping\n running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01\n# print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))\n print('ep %d: total %f. running mean: %f' % (episode_number,reward_sum, running_reward))\n if episode_number % 100 == 0: pickle.dump(model, open('save.p', 'wb'))\n reward_sum = 0\n observation = env.reset() # reset env\n prev_x = None\n\n# if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.\n# print (('ep %d: game finished, reward: %f' % (episode_number, reward)) + ('' if reward == -1 else ' !!!!!!!!'))\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.random.uniform", "numpy.std", "numpy.zeros_like", "numpy.random.randn", "numpy.mean", "numpy.outer", "numpy.exp", "numpy.zeros", "numpy.vstack" ] ]
xieldy/DeGAN
[ "9b480ae2dfa5c0303ca433404c015604e3926274" ]
[ "Other_networks_used/lenet_half.py" ]
[ "import torch \nimport torch.nn as nn\n\nclass LeNet_half(nn.Module):\n def __init__(self, num_classes=10):\n super(LeNet_half, self).__init__()\n\n self.conv1 = nn.Conv2d(1, 3, 5, stride=1, padding=0)\n self.conv1.bias.data.normal_(0, 0.1)\n self.conv1.bias.data.fill_(0) \n \n self.relu = nn.ReLU() \n \n self.pad = nn.MaxPool2d(2, stride=2)\n \n self.conv2 = nn.Conv2d(3, 8, 5, stride=1, padding=0)\n self.conv2.bias.data.normal_(0, 0.1)\n self.conv2.bias.data.fill_(0) \n \n self.fc1 = nn.Linear(200,120)\n self.fc1.bias.data.normal_(0, 0.1)\n self.fc1.bias.data.fill_(0) \n \n self.fc2 = nn.Linear(120,84)\n self.fc2.bias.data.normal_(0, 0.1)\n self.fc2.bias.data.fill_(0) \n \n self.fc3 = nn.Linear(84,num_classes)\n self.fc3.bias.data.normal_(0, 0.1)\n self.fc3.bias.data.fill_(0) \n \n self.soft = nn.Softmax()\n \n def forward(self, x):\n layer1 = self.pad(self.relu(self.conv1(x)))\n layer2 = self.pad(self.relu(self.conv2(layer1)))\n\n flatten = layer2.view(-1, 8*5*5)\n fully1 = self.relu(self.fc1(flatten))\n \n fully2 = self.relu(self.fc2(fully1))\n \n logits = self.fc3(fully2)\n #softmax_val = self.soft(logits)\n\n return logits\n\nmodel = LeNet_half(num_classes=10)\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.ReLU" ] ]
entangle2giraffe/scikitlearn_demo
[ "506cb424cc52d6706cc0694dec0f2f533f755954" ]
[ "poly/app.py" ]
[ "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nimport sys\r\n\r\n# Import the local module\r\n# The module import pandas and slicing dataset\r\n# for specific country of the input of variable 'coa'\r\nfrom util import country, des, instance\r\n\r\n# Read the table\r\ncoa = input(\"Country:\")\r\n\r\n# Store the variable in cdf\r\ncdf = country(coa)\r\n# Separate 80% for train dataset and rest as test dataset\r\nmsk = np.random.rand(len(cdf)) < 0.8\r\ntrain = cdf[msk]\r\ntest = cdf[~msk]\r\n\r\n# Import Linear Regression model\r\nregr = LinearRegression()\r\n\r\n# Prompt the user for degree\r\ndegree = input(\"Degree(only int):\")\r\ndeg_int = int(degree)\r\n\r\n# Train dataset\r\n# Convert List -> Array\r\ntrain_x = np.asanyarray(train[[\"Year\"]])\r\ntrain_y = np.asanyarray(train[[\"Value\"]])\r\n\r\n# Test dataset\r\ntest_x = np.asanyarray(test[[\"Year\"]])\r\ntest_y = np.asanyarray(test[[\"Value\"]])\r\n\r\n# Transform x\r\n# Polynomial\r\npoly = PolynomialFeatures(deg_int)\r\ntrain_x_poly = poly.fit_transform(train_x)\r\n# Learning\r\ntrain_y_ = regr.fit(train_x_poly, train_y)\r\ndes(coa, deg_int)\r\nprint(f\"Coefficient: \", regr.coef_)\r\nprint(f\"Intercept: \", regr.intercept_)\r\n\r\n# Initialize x dimension\r\nXX = np.arange(1950, 2011, 60/7)\r\n\r\n\r\ndef f(deg, arr):\r\n const = regr.intercept_[0]\r\n\r\n if deg <= 0:\r\n return 0\r\n else:\r\n yy = regr.coef_[0][deg] * np.power(arr, deg) + f(deg - 1, arr)\r\n return yy + const\r\n\r\n\r\nfunc = f(deg_int, XX)\r\nplt.plot(XX, func, '-r')\r\nplt.xlabel(\"Year\")\r\nplt.ylabel(\"Value\")\r\nplt.show()\r\n\r\ntest_x_poly = poly.fit_transform(test_x)\r\ntest_y_ = regr.predict(test_x_poly)\r\n\r\n# Plot\r\nplt.show()\r\n\r\n# Accuracy\r\nprint(\"MAE: \", mean_absolute_error(test_y_, test_y))\r\nprint(\"MSE: \", mean_squared_error(test_y_, test_y))\r\nprint(\"R2: \", r2_score(test_y, test_y))\r\n" ]
[ [ "sklearn.metrics.r2_score", "numpy.power", "numpy.arange", "sklearn.metrics.mean_absolute_error", "sklearn.preprocessing.PolynomialFeatures", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.plot", "numpy.asanyarray", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
jha-vikas/pyTorch-implementations
[ "b43ade6f457c661a6bd43209e7affd43a1381e7e" ]
[ "dataset/MNIST/process.py" ]
[ "import os\nimport torch\nfrom torchvision.datasets.mnist import read_image_file, read_label_file\n\nraw_folder = \"raw\"\nprocessed_folder = \"processed\"\ntraining_file = 'training.pt'\ntest_file = 'test.pt'\n\n### Code from https://github.com/pytorch/vision/blob/7d4154735f421b254c408c16e0980b1ca0dd9b8e/torchvision/datasets/mnist.py#L134\n# process and save as torch files\nprint('Processing...')\n\ntraining_set = (\n read_image_file(os.path.join(raw_folder, 'train-images.idx3-ubyte')),\n read_label_file(os.path.join(raw_folder, 'train-labels.idx1-ubyte'))\n)\ntest_set = (\n read_image_file(os.path.join(raw_folder, 't10k-images.idx3-ubyte')),\n read_label_file(os.path.join(raw_folder, 't10k-labels.idx1-ubyte'))\n)\nwith open(os.path.join(processed_folder, training_file), 'wb') as f:\n torch.save(training_set, f)\nwith open(os.path.join(processed_folder, test_file), 'wb') as f:\n torch.save(test_set, f)\n\nprint('Done!')" ]
[ [ "torch.save" ] ]
penguinwang96825/nlp4ml
[ "0a2ffcc53fad493584c649f9dc87ab344c3e814b" ]
[ "nlp4ml/main.py" ]
[ "import swifter\nimport pandas as pd\nimport numpy as np\nfrom pprint import pprint\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import ExtraTreeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import make_classification\nfrom gensim.models import KeyedVectors\n\nfrom preprocessing import clean_tweet, augment_text, KFold\nfrom ensembler import nested_cross_validation, fetch_models\nfrom statistics import under_sampling\nfrom vectoriser import SifEmbeddingVectorizer\nfrom utils import seed_everything\nseed_everything(seed=914)\n\n\ndef main():\n df = pd.read_csv(\"./data/train.csv\")\n print(df)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
LuzieH/pytpt
[ "7c4c3542bffcc4f94b9c74a423cb5b2cd97a366b" ]
[ "src/pytpt/finite.py" ]
[ "import numpy as np\nfrom inspect import isfunction\n#from scipy.linalg import solve\n\n\nclass tpt:\n '''Calculates committor probabilities and A->B transition statistics of\n time-homogeneous or time-inhomogeneous Markov chain models over a\n finite time interval {0,...,N-1} of size N.\n\n based on:\n Helfmann, L., Ribera Borrell, E., Schütte, C., & Koltai, P. (2020).\n Extending Transition Path Theory: Periodically-Driven and Finite-Time\n Dynamics. arXiv preprint arXiv:2002.07474.\n '''\n\n def __init__(self, P, N, ind_A, ind_B, ind_C, init_dens):\n '''Initialize an instance by defining the transition matrix and\n the sets A and B between which the transition statistics should be\n computed.\n\n Args:\n P: array\n - if the dynamics are time-independent:\n irreducible and row-stochastic (rows sum to 1)\n transition matrix of size S x S, S is the size\n of the state space St={1,2,...,S}\n - if the dynamics are time-dependent:\n function P(n) is a transition matrix defined for\n n=0,...,N-2\n N: int\n size of the time interval {0,1,...,N-1}\n ind_A: array\n set of indices of the state space that belong to the set A\n ind_B: array\n set of indices of the state space that belong to the set B\n ind_C: array\n set of indices of the state space that belong to the\n transition region C, i.e. the set C = St-(A u B)\n init_dens: array\n initial density at time 0\n '''\n assert (isfunction(P) or isfunction(P.func)), \"The transition \\\n matrices need to be inputted as a function mapping time to \\\n the corresponding transition matrix.\"\n\n assert (isinstance(P(0), np.ndarray) and not isinstance(P(0),np.matrix)), \\\n \"The inputted transition matrix function should map time to \\\n an np.ndarray and not an np.matrix\"\n\n assert (isinstance(ind_A, np.ndarray) and\n isinstance(ind_B, np.ndarray) and\n isinstance(ind_C, np.ndarray)), \\\n \"The index sets have to be given as np.ndarrays.\"\n\n A = set(ind_A)\n B = set(ind_B)\n C = set(ind_C)\n intersection_AB = A.intersection(B)\n complement_AB = (C.difference(A)).difference(B)\n\n assert (len(A) > 0 and\n len(B) > 0 and\n len(C) > 0 and\n len(intersection_AB) == 0 and\n complement_AB==C), \\\n \"A and B have to be non-empty and disjoint sets \\\n such that also their complement C is non-empty.\"\n\n self.init_dens = init_dens\n self.ind_A = ind_A\n self.ind_B = ind_B\n self.ind_C = ind_C\n self.N = N\n self.P = P\n self.S = np.shape(P(0))[0] # size of the state space\n\n self.dens = None # density\n self.P_back = None # backward transition matrix\n self.q_b = None # backward committor\n self.q_f = None # forward committor\n self.reac_dens = None # reactive density\n self.reac_norm_fact = None # normalization factor\n self.norm_reac_dens = None # normalized reactive density\n self.current = None # reactive current\n self.eff_current = None # effective reactive current\n self.rate = None # rate of transitions from A to B\n self.av_length = None # mean transition length from A to B\n # time-averaged rate of transitions from A to B\n self.time_av_rate = None\n self.current_dens = None # density of the effective current\n\n\n def density(self):\n '''Function that computes and returns an array containing the\n probability to be at time n in node i, the first index of the\n returned array is time n, the second is space/the node i.\n '''\n dens = np.zeros((self.N, self.S))\n\n # initial density\n dens[0, :] = self.init_dens\n\n # compute density at time n+1 by applying the transition matrix\n # to the density at time n\n for n in np.arange(self.N - 1):\n dens[n + 1, :] = dens[n, :].dot(self.P(n))\n\n self.dens = dens\n return dens\n\n\n def backward_transitions(self):\n '''Computes the transition matrix backwards in time. Returns a\n function that for each time n=1,...,N-1 assigns the backward transition\n matrix at time n. When the stationary density in j is zero, the\n corresponding transition matrix entries (row j) are set to 0.\n '''\n P_back_n = np.zeros((self.N, self.S, self.S))\n\n # compute backward transition matrix at each time n\n for n in range(1, self.N):\n idx = np.where(self.dens[n, :] != 0)[0]\n P_back_n[n, idx, :] = self.P(n - 1).T[idx, :] \\\n * self.dens[n - 1, :] \\\n / self.dens[n, idx].reshape(np.size(idx), 1)\n\n # store backward matrix in a function that assigns each time point\n # to the corresponding transition matrix\n def P_back(n):\n return P_back_n[n, :, :]\n\n self.P_back = P_back\n\n return P_back\n\n\n def forward_committor(self):\n '''Function that computes the forward committor q_f (probability\n that the process at time n will next go to B rather than A) for\n all time n in {0,..., N-1}\n '''\n q_f = np.zeros((self.N, self.S))\n\n # forward committor at time n=N is 1 on B and 0 on B^c\n q_f[self.N - 1, self.ind_B] = 1\n\n # iterate backward in time\n for n in np.flip(np.arange(0, self.N - 1)):\n # define the restricted transition matrices at time n\n P_CC = self.P(n)[np.ix_(self.ind_C, self.ind_C)]\n P_CB = self.P(n)[np.ix_(self.ind_C, self.ind_B)]\n\n # compute forward committor in C\n q_f[n, self.ind_C] = P_CC.dot(q_f[n + 1, self.ind_C]) \\\n + np.sum(P_CB, axis=1)\n\n # forward committor is 1 on B and 0 on A\n q_f[n, self.ind_B] = 1\n\n self.q_f = q_f\n return self.q_f\n\n\n def backward_committor(self):\n '''Function that computes the backward committor q_b (probability\n that the process at time n last came from A rather than B) for\n all time n in {0,..., N-1}\n '''\n q_b = np.zeros((self.N, self.S))\n\n # backward committor at time n=0 is 1 on A and 0 on A^c\n q_b[0, self.ind_A] = 1\n\n # iterate forward in time\n for n in range(1, self.N):\n\n # define restricted backward transition matrices at time n-1\n P_back_CC = self.P_back(n)[np.ix_(self.ind_C, self.ind_C)]\n P_back_CA = self.P_back(n)[np.ix_(self.ind_C, self.ind_A)]\n\n # compute backward committor at C\n q_b[n, self.ind_C] = P_back_CC.dot(q_b[n - 1, self.ind_C]) \\\n + np.sum(P_back_CA, axis=1)\n\n # backward committor is 1 on A, 0 on B\n q_b[n, self.ind_A] = 1\n\n self.q_b = q_b\n return self.q_b\n\n\n def reac_density(self):\n '''Given the forward and backward committor and the density,\n we can compute the density of reactive trajectories,\n i.e. the probability to be in a state in S at time n=0,...,N-1\n while being reactive.\n The function returns an array of the reactive\n density for each time (with time as the first index of the\n array).\n '''\n\n assert self.q_f is not None, \"The committor functions need \\\n first to be computed by using the method committor\"\n\n reac_dens = np.zeros((self.N, self.S))\n\n for n in range(0, self.N):\n reac_dens[n, :] = np.multiply(\n self.q_b[n, :],\n np.multiply(self.dens[n], self.q_f[n, :]),\n )\n\n self.reac_dens = reac_dens\n return self.reac_dens\n\n\n def reac_norm_factor(self):\n '''\n This function returns the normalization factor of the reactive\n density, i.e. for each time n it returns the sum over S of\n the reactive density at that time. This is nothing but the\n probability to be reactive/on a transition at time m.\n Note that at times n=0 and N-1, the normalization factor is 0,\n since there are no reactive trajectories yet.\n '''\n if self.reac_dens is None:\n self.reac_dens = self.reac_density()\n\n self.reac_norm_fact = np.sum(self.reac_dens, axis=1)\n return self.reac_norm_fact\n\n\n def norm_reac_density(self):\n '''Given the reactive density and its normalization factor,\n this function returns the normalized reactive density, i.e.\n the probability to be at x in S at time n, given the chain\n is reactive.\n The function returns an array of the reactive\n density for each time (with time as the first index of the\n array).\n At times n=0 and n=N-1 the method returns None because\n the normalized density is 0 for these times, and the\n normalized reactive density thus can't be computed.\n '''\n\n if self.reac_dens is None:\n self.reac_dens = self.reac_density()\n\n if self.reac_norm_fact is None:\n self.reac_norm_fact = self.reac_norm_factor()\n\n norm_reac_dens = np.zeros((self.N, self.S))\n\n # at the time where reac_norm_fact is not null\n idx = np.where(self.reac_norm_fact != 0)[0]\n norm_reac_dens[idx, :] = self.reac_dens[idx, :] \\\n / self.reac_norm_fact[idx].reshape(np.size(idx), 1)\n\n # otherwise\n idx = np.where(self.reac_norm_fact == 0)[0]\n norm_reac_dens[idx, :] = np.nan\n\n # obs: at time 0 and N-1, the reactive density is zero, the event \"to\n # be reactive\" is not possible\n\n self.norm_reac_dens = norm_reac_dens\n return self.norm_reac_dens\n\n\n def reac_current(self):\n '''Computes the reactive current current[i,j] between nodes i at\n time n and j at time n+1, as the flow of reactive trajectories\n from i to j during one time step. Only defined for n=0,..,N-2\n '''\n\n assert self.q_f is not None, \"The committor functions need \\\n first to be computed by using the method committor\"\n\n S = self.S\n\n current = np.zeros((self.N, S, S))\n eff_current = np.zeros((self.N, S, S))\n\n for n in range(self.N - 1):\n # compute reactive current\n current[n, :, :] = self.dens[n].reshape(S, 1) \\\n * self.q_b[n, :].reshape(S, 1) \\\n * self.P(n) \\\n * self.q_f[n + 1, :]\n\n # compute effective current\n eff_current[n, :, :] = np.maximum(\n np.zeros((S, S)),\n current[n, :, :] - current[n, :, :].T,\n )\n\n # reactive and effective current not defined at time N-1\n current[self.N - 1] = np.nan\n eff_current[self.N - 1] = np.nan\n\n self.current = current\n self.eff_current = eff_current\n\n return self.current, self.eff_current\n\n\n def transition_rate(self):\n '''The transition rate is the average flow of reactive\n trajectories out of A (first row), or into B (second row).\n The time-averaged transition rate is the averaged transition rate over\n {0, ..., N-1}. This method returns a tuple with the transition\n rate array and the time averaged transition rate array\n '''\n\n assert self.current is not None, \"The reactive current first needs \\\n to be computed by using the method reac_current\"\n\n rate = np.zeros((self.N, 2))\n\n rate[:self.N - 1, 0] = np.sum(\n self.current[:self.N - 1, self.ind_A, :], axis=(1, 2)\n )\n rate[self.N - 1, 0] = np.nan\n\n rate[1:, 1] = np.sum(\n self.current[:self.N - 1, :, self.ind_B], axis=(1, 2)\n )\n rate[0, 1] = np.nan\n\n # averaged rate over the time interval\n time_av_rate = np.zeros(2)\n time_av_rate[0] = sum(rate[:self.N - 1, 0]) / (self.N)\n time_av_rate[1] = sum(rate[1:, 1]) / (self.N)\n\n self.rate = rate\n self.time_av_rate = time_av_rate\n return self.rate, self.time_av_rate\n\n def mean_transition_length(self):\n '''The mean transition length can be computed as the ration of\n the reac_norm_fact and the transition rate.\n '''\n\n assert self.reac_norm_fact is not None, \"The normalization \\\n factor first needs to be computed by using the method \\\n reac_norm_factor\"\n\n assert self.rate is not None, \"The transition rate first needs \\\n to be computed by using the method transition_rate\"\n\n self.av_length = np.nansum(self.reac_norm_fact) \\\n / np.nansum(self.rate[:, 0])\n\n return self.av_length\n\n\n def current_density(self):\n '''The current density in a node is the sum of effective\n currents over all neighbours of the node.\n '''\n\n assert self.current is not None, \"The reactive current first needs \\\n to be computed by using the method reac_current\"\n\n current_dens = np.zeros((self.N, self.S))\n for n in range(self.N):\n if np.isnan(self.eff_current[n]).any():\n current_dens[n] = np.nan\n else:\n for i in self.ind_C:\n current_dens[n, i] = np.sum(self.eff_current[n, i, :])\n\n self.current_dens = current_dens\n return self.current_dens\n\n def compute_statistics(self):\n '''\n Function that runs all methods to compute transition statistics.\n '''\n self.density()\n self.backward_transitions()\n self.forward_committor()\n self.backward_committor()\n self.norm_reac_density()\n self.reac_current()\n self.transition_rate()\n self.mean_transition_length()\n self.current_density()\n\n def save_statistics(self, npz_path):\n '''\n Method that saves all the computed transition statistics,\n the not computed statistics are saved as None.\n\n Args:\n\n '''\n np.savez(\n npz_path,\n dens=self.dens,\n q_f=self.q_f,\n q_b=self.q_b,\n reac_norm_fact=self.reac_norm_fact,\n norm_reac_dens=self.norm_reac_dens,\n eff_current=self.eff_current,\n rate=self.rate,\n time_av_rate=self.time_av_rate,\n av_length=self.av_length,\n current_dens = self.current_dens\n )\n" ]
[ [ "numpy.ix_", "numpy.savez", "numpy.multiply", "numpy.isnan", "numpy.arange", "numpy.size", "numpy.nansum", "numpy.where", "numpy.zeros", "numpy.sum" ] ]
nik-hil/imaterialist-fashion-2019-FGVC6
[ "77e63f2cfb18ea0d85a74b31d882300148c7496f" ]
[ "util.py" ]
[ "\nimport numpy as np\nimport pandas as pd\n\nfrom pathlib import Path\nfrom fastai.vision import *\nfrom fastai.callbacks.hooks import *\nfrom fastai.utils.mem import *\n\nfrom itertools import groupby\nfrom progressbar import ProgressBar\nimport cv2\nimport os\nimport json\nimport torchvision\nfrom datetime import datetime\n\ncategory_num = 46 + 1\n\nclass ImageMask():\n masks = {}\n \n def make_mask_img(self, segment_df):\n seg_width = segment_df.iloc[0].Width\n seg_height = segment_df.iloc[0].Height\n \n seg_img = np.copy(self.masks.get((seg_width, seg_height)))\n try:\n if not seg_img:\n seg_img = np.full(seg_width*seg_height, category_num-1, dtype=np.int32)\n self.masks[(seg_width, seg_height)] = np.copy(seg_img)\n except:\n # seg_img exists\n pass\n for encoded_pixels, class_id in zip(segment_df[\"EncodedPixels\"].values, segment_df[\"ClassId\"].values):\n pixel_list = list(map(int, encoded_pixels.split(\" \")))\n for i in range(0, len(pixel_list), 2):\n start_index = pixel_list[i] - 1\n index_len = pixel_list[i+1] - 1\n if int(class_id.split(\"_\")[0]) < category_num - 1:\n seg_img[start_index:start_index+index_len] = int(class_id.split(\"_\")[0])\n seg_img = seg_img.reshape((seg_height, seg_width), order='F')\n return seg_img\n \n\ndef create_label(df, images, path_lbl):\n \"\"\"\n img_name = \"53d0ee82b3b7200b3cec8c3c1becead9.jpg\"\n img_df = df[df.ImageId == img_name]\n img_mask = ImageMask()\n mask = img_mask.make_mask_img(img_df)\n \"\"\"\n img_mask = ImageMask()\n\n print(\"Start creating label\")\n for i,img in enumerate(images):\n fname = path_lbl.joinpath(os.path.splitext(img)[0] + '_P.png').as_posix()\n if os.path.isfile(fname): # skip\n continue\n img_df = df[df.ImageId == img]\n mask = img_mask.make_mask_img(img_df)\n img_mask_3_chn = np.dstack((mask, mask, mask))\n cv2.imwrite(fname, img_mask_3_chn)\n if i % 40 ==0 : print(i, end=\" \")\n print(\"Finish creating label\")\n \n \ndef get_predictions(path_test, learn, size):\n # predicts = get_predictions(path_test, learn)\n learn.model.cuda()\n files = list(path_test.glob(\"**/*.jpg\")) #<---------- HERE\n test_count = len(files)\n results = {}\n for i, img in enumerate(files):\n results[img.stem] = learn.predict(open_image(img).resize(size))[1].data.numpy().flatten()\n \n if i%20==0:\n print(\"\\r{}/{}\".format(i, test_count), end=\"\")\n return results \n \n\n# https://www.kaggle.com/go1dfish/u-net-baseline-by-pytorch-in-fgvc6-resize\ndef encode(input_string):\n return [(len(list(g)), k) for k,g in groupby(input_string)]\n\ndef run_length(label_vec):\n encode_list = encode(label_vec)\n index = 1\n class_dict = {}\n for i in encode_list:\n if i[1] != category_num-1:\n if i[1] not in class_dict.keys():\n class_dict[i[1]] = []\n class_dict[i[1]] = class_dict[i[1]] + [index, i[0]]\n index += i[0]\n return class_dict\n\n \ndef get_submission_df(predicts):\n sub_list = []\n for img_name, mask_prob in predicts.items():\n class_dict = run_length(mask_prob)\n if len(class_dict) == 0:\n sub_list.append([\"{0}.jpg\".format(img_name), \"1 1\", 1])\n else:\n for key, val in class_dict.items():\n sub_list.append([\"{0}.jpg\".format(img_name), \" \".join(map(str, val)), key])\n # sub_list\n jdf = pd.DataFrame(sub_list, columns=['ImageId','EncodedPixels', 'ClassId'])\n return jdf\n \n \ndef test_mask_to_img(segment_df):\n \"\"\"\n plt.imshow(test_mask_to_img(jdf))\n \"\"\"\n seg_img = np.full(size*size, category_num-1, dtype=np.int32)\n for encoded_pixels, class_id in zip(segment_df[\"EncodedPixels\"].values, segment_df[\"ClassId\"].values):\n encoded_pixels= str(encoded_pixels)\n class_id = str(class_id)\n \n pixel_list = list(map(int, encoded_pixels.split(\" \")))\n for i in range(0, len(pixel_list), 2):\n start_index = pixel_list[i] - 1\n index_len = pixel_list[i+1] - 1\n if int(class_id.split(\"_\")[0]) < category_num - 1:\n seg_img[start_index:start_index+index_len] = int(class_id.split(\"_\")[0])\n seg_img = seg_img.reshape((size, size), order='F')\n return seg_img\n \n\n\n\n# Not Used \ndef rle_decode(pixels):\n# https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66\n# https://stackoverflow.com/a/32681075/618018\n pixels = pixels.flatten()\n \"\"\" run length encoding. Partial credit to R rle function. \n Multi datatype arrays catered for including non Numpy\n returns: tuple (runlengths, startpositions, values) \"\"\"\n ia = np.asarray(pixels) # force numpy\n n = len(ia)\n if n == 0: \n return (None, None, None)\n else:\n y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)\n i = np.append(np.where(y), n - 1) # must include last element posi\n z = np.diff(np.append(-1, i)) # run lengths\n p = np.cumsum(np.append(0, z))[:-1] # positions\n return(z, p, ia[i])\n\n# Not Used\ndef weave(count, start):\n return np.ravel((count, start), order='F')\n# count, start, klass = rle_decode(mask)\n# weave(count, start)\n# (weave(count, start)[::2]).shape, value.shape\n" ]
[ [ "numpy.asarray", "numpy.dstack", "pandas.DataFrame", "numpy.full", "numpy.append", "numpy.copy", "numpy.ravel", "numpy.array", "numpy.where" ] ]
Hyper-Devil/opencv-python
[ "c1bdf36295bc01184165d172bd0bae5d61abdda0" ]
[ "6.wrapPerspective.py" ]
[ "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimg2 = cv2.imread(r'D:\\master\\opencv-python\\image\\sudoku.jpg')\nimg = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) # matplotlib的图像为RGB格式\nrows, cols, ch = img.shape\n\npts1 = np.float32([[223, 490], [563, 455], [186, 847], [577, 848]])\npts2 = np.float32([[0, 0], [300, 0], [0, 300], \n [300, 300]])\n\nM = cv2.getPerspectiveTransform(pts1, pts2)\n\ndst = cv2.warpPerspective(img, M, (300, 300))\n\nplt.subplot(121), plt.imshow(img), plt.title('Input')\nplt.subplot(122), plt.imshow(dst), plt.title('Output')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "numpy.float32", "matplotlib.pyplot.show" ] ]
dnolivieri/MResVgene
[ "08d5cc2368b051ab64ac593b8ebb1b725265f30a" ]
[ "mresvgene/PDT3method.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n dnolivieri: updated ...4 dec 2015\n * used to generate the \"positive\" multiresolution signals.\n - based upon loci.\n\n\"\"\"\nimport numpy as np\nimport sys\nimport itertools\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom scipy import *\nimport struct\nimport re\nfrom propy import PyPro\nfrom propy.GetProteinFromUniprot import GetProteinSequence\nimport json\nimport cPickle as pickle\nimport errno\nimport collections\n\nrno = {'A':0,'R':1,'N':2,'D':3,'C':4,'Q':5,'E':6,'G':7,'H':8,'I':9,'L':10,'K':11,'M':12,'F':13,'P':14,'S':15,'T':16,'W':17,'Y':18,'V':19}\n\ndef make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n\n\nclass PDT3:\n def __init__(self):\n self.normI=self.normalized_AAindx()\n\n def normalized_AAindx(self):\n fp = open('./aaRefs/aaindex.txt','r')\n D=[]\n for lk in fp:\n q=[ float(i) for i in lk.split() ]\n D.append(q)\n\n Dvec=[]\n normI = []\n for j in D:\n q= np.sum(np.array( j ))/20.\n denom=0.0\n for kp in j:\n denom+= (kp - q)*(kp - q)\n\n denom = np.sqrt(denom/20.)\n abar=[]\n for kp in j:\n abar.append( (kp - q)/denom )\n\n normI.append(abar)\n\n save_object(normI, r'./aaRefs/normalizedAA_Matrix.pkl')\n return normI\n\n def get_seq2vector(self, seq):\n Dvec={}\n cnt=0\n for q in self.normI:\n sumDseq=0.0\n for i in range(len(seq)-3):\n sumDseq+= (q[rno[seq[i]]] - q[rno[seq[i+3]]])*(q[rno[seq[i]]] - q[rno[seq[i+3]]])\n\n sumDseq = sumDseq/np.float(len(seq)-3)\n Dvec.update( {str(cnt): sumDseq} )\n cnt+=1\n return Dvec\n\n def get_lambda_seq2vector(self, seq, qLambda):\n Dvec={}\n cnt=0\n for q in self.normI:\n sumDseq=0.0\n for i in range(len(seq)- qLambda):\n pi1 = rno[seq[i]]\n pi2 = rno[seq[i+qLambda]]\n sumDseq+= (q[ pi1 ] - q[ pi2 ])*(q[ pi1 ] - q[ pi2 ])\n sumDseq = sumDseq/np.float(len(seq)-1)\n Dvec.update( {str(cnt): sumDseq} )\n cnt+=1\n return Dvec\n\n\n def count_sequential_doubles(self, s):\n dbar = [ s[i-1]+s[i] for i in range(len(s)) if i>0 ]\n res=collections.Counter(dbar)\n return res\n\n def get_freq_seq2vector(self, s):\n def merge_two_dicts(x, y):\n '''Given two dicts, merge them into a new dict as a shallow copy.'''\n z = x.copy()\n z.update(y)\n return z\n aa ='ARNDCQEGHILKMFPSTWYV'\n ## single\n dbar = [ x for x in aa] \n S={}\n for i in dbar:\n S.update({i:0})\n T=collections.Counter(s)\n y = merge_two_dicts(S, T) \n ybar = np.array([ y[j] for j in dbar])\n\n ## double.\n dbar = [ x[0]+x[1] for x in list(itertools.product(aa, repeat=2))] \n D={}\n for i in dbar:\n D.update({i:0})\n B= self.count_sequential_doubles(s)\n z = merge_two_dicts(D, B)\n zbar = np.array([ z[j] for j in dbar ])\n \n rho = np.hstack( (ybar,zbar))\n \n Dvec={}\n for k in range(len(rho)):\n Dvec.update( {str(k): rho[k]} )\n return Dvec\n\n\n## ---------------MAIN ----------------------------------\nif __name__ == '__main__':\n\n\n \n P = PDT3()\n" ]
[ [ "numpy.hstack", "numpy.array", "numpy.sqrt" ] ]
mahmoud-al-najar/DSPEB
[ "9eecb7d9bdef4ba2c3ec72a25fe6a159ecc75105" ]
[ "create_dataset.py" ]
[ "import os\nimport time\nimport gdal\nimport shutil\nimport random\nimport numpy as np\nimport pandas as pd\nimport datagen_config as cfg\n\nfrom utilities.data_io import make_tarfile\nfrom utilities.common import get_blue_ratio, make_datagen_log_file\nfrom utilities.data_io import parse_sentinel2_tiles_metadata_from_datalake, datagen_get_bathy_xyz\nfrom utilities.preprocessing import apply_fft, apply_per_band_min_max_normalization, apply_normxcorr2\n\n# get list of Sentinel-2 Tile objects\nsentinel2tile_list = parse_sentinel2_tiles_metadata_from_datalake()\nprint(sentinel2tile_list)\nprint(f'len(tiles) = {len(sentinel2tile_list)}')\n\n# get bathymetry xyz\nbathy_xyz = datagen_get_bathy_xyz(sentinel2tile_list)\nx, y, z = bathy_xyz\nprint(len(x), len(y), len(z))\nprint(x)\n\nif not os.path.exists(cfg.out_path_dataset):\n os.mkdir(cfg.out_path_dataset)\n\nif not os.path.exists(cfg.out_path_tmpdir):\n os.mkdir(cfg.out_path_tmpdir)\n\ntmp_dirname = os.path.join(cfg.out_path_tmpdir, f'tmp_{cfg.region}')\nif not os.path.exists(tmp_dirname):\n os.mkdir(tmp_dirname)\n\nnb_tiles = len(cfg.tiles)\n\nprint(f'####################################### Bathy Loading ################################################')\n\nx, y, z = bathy_xyz\n\nfor i in range(nb_tiles):\n print(f'Tile {cfg.tiles[i]}: {len(x[i])} measurement points')\n\ngood = 0\nbad1 = 0 # too close to the tile border\nbad2 = 0 # nan or inf\nbad3 = 0 # ValueError\nbad4 = 0 # Clouds\ndataframe = pd.DataFrame([], columns=['z', 'x', 'y', 'epsg', 'max_energy'])\n\nfor i in range(len(sentinel2tile_list)):\n tile = sentinel2tile_list[i]\n print(f'Tile : {tile.id}')\n for safe in tile.safes:\n nb1 = 0\n t = time.time()\n a = os.listdir(os.path.join(safe.s2_path, 'GRANULE'))\n path = os.path.join(safe.s2_path, 'GRANULE', a[0], 'IMG_DATA', f'T{tile.id}_{safe.date}T{safe.time}_')\n\n for k in range(len(x[i])):\n z_tid = z[i][k] + safe.tidal_elevation\n if cfg.depth_lim_min <= z_tid <= cfg.depth_lim_max:\n cx = int((x[i][k] - tile.corner['x']) / 10 - cfg.w_sub_tile / 2)\n cy = int((tile.corner['y'] - y[i][k]) / 10 + cfg.w_sub_tile / 2)\n if cx + cfg.w_sub_tile < cfg.w_sentinel / 10 - 1 and \\\n cx > 0 and \\\n cy + cfg.w_sub_tile < cfg.w_sentinel / 10 - 1 and \\\n cy > 0:\n nb1 += 1\n\n rotations = np.arange(0, 360, 15)\n angle = random.choice(rotations)\n Bands, north, south, east, west = safe.get_subtile_around_center(x[i][k], y[i][k],\n rotation_angle=angle)\n print(north, south, east, west)\n\n if Bands is None or np.isnan(np.min(Bands)) or np.isinf(np.min(Bands)):\n bad2 += 1\n else:\n try:\n ratio_blue = get_blue_ratio(Bands)\n if ratio_blue < 0.8:\n bad4 += 1\n else:\n\n if apply_fft in cfg.preprocessing_funcs:\n B_fft, flag, max_energy = apply_fft(Bands,\n energy_min_thresh=cfg.min_energy,\n energy_max_thresh=cfg.max_energy)\n if apply_normxcorr2 in cfg.preprocessing_funcs:\n B_fnxc = apply_normxcorr2(B_fft)\n if apply_per_band_min_max_normalization in cfg.preprocessing_funcs:\n B_fnxc = apply_per_band_min_max_normalization(B_fnxc)\n\n good += 1\n num = '{0:05}'.format(good)\n\n tmp_name = f'{tmp_dirname}/{num}_{tile.id}_{safe.date}'\n np.save(tmp_name, B_fnxc)\n\n tmp_name_raw = f'{tmp_name}_RAW'\n np.save(tmp_name_raw, Bands)\n\n df = pd.DataFrame(\n [[z_tid, x[i][k], y[i][k], z[i][k], tile.epsgs[0], max_energy]],\n index=[tmp_name],\n columns=['z', 'x', 'y', 'z_no_tide', 'epsg', 'max_energy'])\n dataframe = dataframe.append(df)\n if len(dataframe.index) % 1000 == 0:\n dataframe.to_csv(\n cfg.out_path_csv + cfg.region + '_' + str(len(dataframe.index)) + '_.csv')\n tmp_tarname = cfg.out_path_tmpdir + '/' + cfg.region + '_' + str(\n len(dataframe.index)) + '_TEMP.tar.gz'\n make_tarfile(tmp_tarname, tmp_dirname)\n shutil.copy(tmp_tarname, cfg.out_path_dataset)\n\n except ValueError:\n bad3 += 1\n\n print(f' {nb1} subtiles computed in {tile.id} {safe.date}')\n print(f' Computational time : {time.time() - t}')\n\ndataframe.to_csv(os.path.join(cfg.out_path_csv, f'{cfg.region}_FULL.csv'))\ntmp_tarname = os.path.join(cfg.out_path_tmpdir, f'{cfg.region}_{str(len(dataframe.index))}_FULL.tar.gz')\nmake_tarfile(tmp_tarname, tmp_dirname)\nshutil.copy(tmp_tarname, cfg.out_path_dataset)\nmake_datagen_log_file()\n\nprint('###################################################################################################')\nprint(f'{good + bad1 + bad2 + bad3 + bad4} Input samples')\nprint(f'{good} Good samples')\nprint(f'{bad1 + bad2 + bad3 + bad4} Rejected samples, ({bad1}, {bad2}, {bad3}, {bad4})')\nprint(f'##################################################################################################')\n" ]
[ [ "numpy.arange", "numpy.save", "pandas.DataFrame", "numpy.min" ] ]