repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
cxy1997/Transferable-Active-Grasping
[ "a826889bcdc466a59696e7d65f024a6c8237f6ed" ]
[ "viewpoint_optim/RL_pointnet/evaluate.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nimport argparse\nimport os\nimport sys\nsys.path.append('..')\nimport time\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.cpp_extension import load\n\nfrom environment import ActiveAgent\nfrom pointnet import PointNetActorCritic\nfrom utils import setup_logger\n\n\n# Training settings\nparser = argparse.ArgumentParser(description='A2C')\nparser.add_argument('--lr', type=float, default=0.0001,\n help='learning rate (default: 0.0001)')\nparser.add_argument('--hidden-size', type=int, default=1024,\n help='Hidden size for LSTM')\nparser.add_argument('--gamma', type=float, default=0.99,\n help='discount factor for rewards (default: 0.99)')\nparser.add_argument('--tau', type=float, default=1.00,\n help='parameter for GAE (default: 1.00)')\nparser.add_argument('--entropy-coef', type=float, default=0.01,\n help='entropy term coefficient (default: 0.01)')\nparser.add_argument('--value-loss-coef', type=float, default=0.5,\n help='value loss coefficient (default: 0.5)')\nparser.add_argument('--max-grad-norm', type=float, default=20,\n help='value loss coefficient (default: 50)')\nparser.add_argument('--seed', type=int, default=456,\n help='random seed (default: 1)')\nparser.add_argument('--num-steps', type=int, default=20,\n help='number of forward steps in A2C (default: 20)')\nparser.add_argument('--max-episode-length', type=int, default=50,\n help='maximum length of an episode (default: 1000000)')\nparser.add_argument('--env-name', default='PointNetActorCritic',\n help='environment to train on')\nparser.add_argument('--no-shared', default=False,\n help='use an optimizer without shared momentum.')\nparser.add_argument('--n-points', type=int, default=3000,\n help='the number of points feed to pointnet')\nparser.add_argument('--log-dir', type=str, default='logs',\n help='Folder to save logs')\nparser.add_argument('--model-dir', type=str, default='trained_models',\n help='Folder to save models')\nparser.add_argument('--data-dir', type=str, default='data',\n help='Folder to IORD')\nparser.add_argument('--resume', default=True,\n help='resume latest model or not')\nparser.add_argument('--num-actions', type=int, default=5,\n help='discrete action space')\nparser.add_argument('--num-test', type=int, default=50,\n help='test time')\nparser.add_argument('--min', type=bool, default=True,\n help='use min-vis or not')\nparser.add_argument('--mode', type=str, default='semantic',\n help='vision mode')\n\n# segmentation settings\nparser.add_argument(\"--depth-fusion\", type=str, default='no-depth',\n choices=['no-depth', 'pixel-concat', 'feature-concat'])\nparser.add_argument(\"--vote-mode\", metavar=\"NAME\",\n type=str, choices=[\"plain\", \"mean\", \"voting\", \"max\",\n \"mean+flip\", \"voting+flip\", \"max+flip\"], default=\"mean\")\nparser.add_argument(\"--vote-scales\", type=list, default=[0.7, 1.2])\nparser.add_argument(\"--output-mode\", metavar=\"NAME\", type=str, choices=[\"palette\", \"raw\", \"prob\"],\n default=\"class\",\n help=\"How the output files are formatted.\"\n \" -- palette: color coded predictions\"\n \" -- raw: gray-scale predictions\"\n \" -- prob: gray-scale predictions plus probabilities\")\nparser.add_argument(\"--snapshot\", metavar=\"SNAPSHOT_FILE\", type=str, default='wide_resnet38_deeplab_vistas.pth.tar', help=\"Snapshot file to load\")\nparser.add_argument(\"--seg-model-dir\", type=str, default=\"path of segmentation model\")\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n if not os.path.isdir(args.log_dir):\n os.makedirs(args.log_dir)\n\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n\n model = PointNetActorCritic(num_points=args.n_points, num_actions=args.num_actions)\n model = model.cuda()\n env = ActiveAgent(idx=0, n_points=args.n_points, \n seg_args=args, mode='semantic', root_path=args.data_dir)\n env.seed(args.seed)\n\n # resume latest model\n if args.resume:\n model_path = os.path.join(args.model_dir, 'latest.pth')\n if not os.path.isdir(args.model_dir):\n os.makedirs(args.model_dir)\n elif os.path.exists(model_path):\n print('Loading model from %s...' % model_path)\n model.load_state_dict(torch.load(model_path))\n\n itr = 0\n epoch = 0\n training_time = 50\n # train_logger = setup_logger('trainer', os.path.join(args.log_dir, 'trainer_log.txt'))\n # test_logger = setup_logger('test', os.path.join(args.log_dir, 'test_log.txt'))\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # test parameters\n all_success_time = 0\n all_time = 0\n ep_success_time = 0\n success_phase = 0.1\n check_flag = False\n\n for _ in range(5):\n epoch += 1\n ################### testing phase ###################\n model = model.eval()\n\n state, _ = env.reset(min_vis=args.min)\n state = Variable(torch.from_numpy(state).unsqueeze(0))\n if torch.cuda.is_available():\n state = state.cuda()\n reward_sum = 0\n done = True\n\n episode_length = 0\n testing = True\n while testing:\n episode_length += 1\n # Sync with the shared model\n if done:\n with torch.no_grad():\n cx = torch.zeros(1, args.hidden_size)\n hx = torch.zeros(1, args.hidden_size)\n else:\n with torch.no_grad():\n cx = cx.data\n hx = hx.data\n if torch.cuda.is_available():\n hx = hx.cuda()\n cx = cx.cuda()\n\n with torch.no_grad():\n value, logit, (hx, cx) = model((state, (hx, cx)))\n prob = F.softmax(logit, dim=1)\n action = prob.max(1, keepdim=True)[1].data.cpu().numpy()\n\n # path_info = '%s %s %s %d' % (env.target_group, env.scene_idx, env.coord, action[0, 0])\n # test_logger.info(path_info)\n\n state, reward, done = env.step(action[0, 0])\n reward_sum += reward\n\n if done:\n # print('testing: ', all_time)\n success = env.end_flag\n all_success_time += success\n ep_success_time += success\n all_time += 1\n if all_time % args.num_test == 0:\n check_flag = True\n\n state, _ = env.reset(min_vis=args.min)\n time.sleep(0.1)\n\n print('testing: ', all_time)\n\n state = Variable(torch.from_numpy(state).unsqueeze(0))\n if torch.cuda.is_available():\n state = state.cuda()\n\n if check_flag:\n all_success_rate = all_success_time / all_time\n log_info = 'Num steps: %d, Episode length: %d, Reward: %0.2f, EP Success: %0.4f, ALL Success: %0.4f' \\\n % (itr, episode_length, reward_sum, ep_success_time / args.num_test, all_success_rate)\n # test_logger.info(log_info)\n print(log_info)\n\n reward_sum = 0\n episode_length = 0\n ep_success_time = 0\n check_flag = False\n testing = False\n\n time.sleep(1)" ]
[ [ "torch.nn.functional.softmax", "numpy.random.seed", "torch.load", "torch.zeros", "torch.manual_seed", "torch.from_numpy", "torch.no_grad", "torch.cuda.is_available" ] ]
mkarthick271/oid_fasterrcnn
[ "9b07d6ba9a9e0f83f0d4f68df9c29a811408c75f" ]
[ "valprepoid.py" ]
[ "import pandas as pd\nfrom imageio import imread\nimport psycopg2\nimport numpy as np\nimport csv \nimport pickle\nimport os\nfrom imageio import imread \nimport pdb\nimport copy\nimport json\nimport csv\n\ndef init_data():\n root_dir = os.path.abspath(os.path.dirname(__file__))\n labdata = pd.read_csv(root_dir + '/data/train/challenge-2019-classes-description-500.csv') \n labeldict = {}\n labeldescdict = {}\n for rows in labdata.iterrows():\n labeldict[rows[1]['labelname']] = rows[1]['labeldesc']\n labeldescdict[rows[1]['labeldesc']] = rows[1]['labelname']\n labels = labdata.loc[:, ['labeldesc']] \n labels = list(labels['labeldesc']) \n labels.insert(0, '__background__') \n labels = tuple(labels)\n num_classes = len(labels) \n classdict = dict(zip(labels, range(num_classes)))\n\n imgdata = pd.read_csv(root_dir + '/data/valoiddata/validation-images-with-rotation.csv')\n imgs = (imgdata.loc[:, ['ImageID']]).drop_duplicates()\n img_names = list(imgs['ImageID'])\n img_size = len(img_names) \n return img_names, img_size, labels, num_classes, classdict, labeldescdict\n\n\ndef load_oid_annotation(i, img, img_size, classdict):\n print(\"Loading annotation for image {} of {}\".format(i+1, img_size)) \n boxes = np.zeros((1, 4), dtype=np.float32)\n gt_classes = np.zeros((1), dtype=np.int32)\n root_dir = os.path.abspath(os.path.dirname(__file__)) \n img_path = os.path.join(root_dir + '/data/valoiddata/validation', img+'.jpg')\n try:\n img_dim = imread(img_path)\n width, height = img_dim.shape[1], img_dim.shape[0]\n except:\n with open('imgnotfound.csv', 'a') as out:\n csv_out = csv.writer(out)\n csv_out.writerow([img])\n width, height = 0, 0\n \n flipped = False\n return {'boxes': boxes, 'gt_classes': gt_classes, 'img_id': img, 'image': img_path, 'width': width, 'height': height, 'flipped': flipped}\n\ndef gt_oid_roidb(img_names, img_size, classdict):\n root_dir = os.path.abspath(os.path.dirname(__file__))\n cache_file = os.path.join(root_dir + '/data/valoiddata/', 'valoid' + '_gt_roidb.pk')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print(\"OID gt roidb loaded from {}\".format(cache_file))\n print(\"Number of images loaded from .pk file {}\".format(len(roidb)))\n for i in range(len(roidb)):\n if not os.path.exists(roidb[i]['image']):\n del roidb[i]\n print(\"Number of images after image path check is {}\".format(len(roidb)))\n return roidb\n roidb = [load_oid_annotation(i, img_names[i], img_size, classdict) for i in range(img_size)]\n with open(cache_file, 'wb') as fid:\n pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)\n print(\"Wrote OID gt roidb to {}\".format(cache_file))\n return roidb\n\ndef rank_roidb_ratio(roidb):\n ratio_large = 2\n ratio_small = 0.5\n ratio_list = []\n\n for i in range(len(roidb)):\n width = roidb[i]['width']\n height = roidb[i]['height']\n if height != 0:\n ratio = width/float(height)\n\n if ratio > ratio_large:\n roidb[i]['need_crop'] = 1\n ratio = ratio_large\n elif ratio < ratio_small:\n roidb[i]['need_crop'] = 1\n ratio = ratio_small\n else:\n roidb[i]['need_crop'] = 0\n\n ratio_list.append(ratio)\n\n ratio_list = np.array(ratio_list)\n ratio_index = np.argsort(ratio_list)\n return ratio_list[ratio_index], ratio_index\n\ndef filter_roidb(roidb):\n print(\"Before filtering there are {} images\".format(len(roidb)))\n i = 0\n while i < len(roidb):\n if len(roidb[i]['boxes']) == 0 or roidb[i]['width'] == 0:\n del roidb[i]\n i -= 1\n i += 1\n print(\"After filtering there are {} images\".format(len(roidb)))\n return roidb\n\ndef ret_oid_data():\n img_names, img_size, labels, num_classes, classdict, labeldescdict = init_data()\n gt_roidb = gt_oid_roidb(img_names, img_size, classdict)\n gt_roidb = filter_roidb(gt_roidb)\n ratio_list, ratio_index = rank_roidb_ratio(gt_roidb)\n return labels, num_classes, gt_roidb, ratio_list, ratio_index, labeldescdict\n\n" ]
[ [ "numpy.argsort", "numpy.array", "pandas.read_csv", "numpy.zeros" ] ]
sssssdzxc/CMU10703_Assignments
[ "ddb5c41db6e5632cbc088cb0f4c1b9214110f6f2" ]
[ "HW2/deeprl_hw2/preprocessors.py" ]
[ "\n\"\"\"Suggested Preprocessors.\"\"\"\n\nimport numpy as np\nfrom PIL import Image\nfrom collections import deque\nimport time\n\nfrom deeprl_hw2 import utils\nfrom deeprl_hw2.core import Preprocessor\n\n\nclass HistoryPreprocessor(Preprocessor):\n \"\"\"Keeps the last k states.\n\n Useful for domains where you need velocities, but the state\n contains only positions.\n\n When the environment starts, this will just fill the initial\n sequence values with zeros k times.\n\n Parameters\n ----------\n history_length: int\n Number of previous states to prepend to state being processed.\n\n \"\"\"\n\n def __init__(self, history_length=1):\n self.history_length = history_length\n self.history = deque([], maxlen = history_length)\n\n def process_state_for_network(self, state):\n \"\"\"You only want history when you're deciding the current action to take.\n return a 4d numpy array, first dimension is for batch\n \"\"\"\n assert state.dtype == 'uint8'\n assert state.shape == (84, 84)\n\n if (len(self.history) < self.history_length - 1):\n for i in xrange(len(self.history), self.history_length - 1):\n self.history.append(np.zeros(state.shape, dtype = 'uint8'))\n\n assert len(self.history) >= self.history_length - 1\n\n self.history.append(state)\n history_state = np.zeros((1, state.shape[0], state.shape[1], self.history_length), dtype = 'uint8')\n for i in xrange(len(self.history)):\n history_state[:, :, :, i] = self.history[i]\n\n return history_state\n\n def reset(self):\n \"\"\"Reset the history sequence.\n\n Useful when you start a new episode.\n \"\"\"\n self.history.clear()\n\n def get_config(self):\n return {'history_length': self.history_length}\n\n\nclass AtariPreprocessor(Preprocessor):\n \"\"\"Converts images to greyscale and downscales.\n\n You may also want to max over frames to remove flickering. Some\n games require this (based on animations and the limited sprite\n drawing capabilities of the original Atari).\n\n Parameters\n ----------\n new_size: 2 element tuple\n The size that each image in the state should be scaled to. e.g\n (84, 84) will make each image in the output have shape (84, 84).\n \"\"\"\n\n def __init__(self, new_size):\n self.new_size = new_size\n\n def process_state_for_memory(self, observation):\n \"\"\"Scale, convert to greyscale and store as uint8.\n\n We don't want to save floating point numbers in the replay\n memory. We get the same resolution as uint8, but use a quarter\n to an eigth of the bytes (depending on float32 or float64)\n\n We recommend using the Python Image Library (PIL) to do the\n image conversions.\n \"\"\"\n assert observation.ndim == 3 # (height, width, channel)\n state_unit8 = np.zeros(self.new_size)\n image_tmp = Image.fromarray(observation)\n state_unit8 = np.asarray(image_tmp.resize(self.new_size).convert('L'))\n state_unit8.astype('uint8') \n assert state_unit8.shape == self.new_size\n\n return state_unit8\n\n\n def process_state_for_network(self, observation):\n \"\"\"Scale, convert to greyscale and store as float32.\n\n Basically same as process state for memory, but this time\n outputs float32 images.\n \"\"\"\n\n # process observation from raw image to 84x84x1 with floating type\n return self.process_state_for_memory(observation).astype('float32') / 255. \n\n def process_reward(self, reward):\n \"\"\"Clip reward between -1 and 1.\"\"\"\n return np.clip(reward, -1.0, 1.0)\n\n\nclass PreprocessorSequence(Preprocessor):\n \"\"\"You may find it useful to stack multiple prepcrocesosrs (such as the History and the AtariPreprocessor).\n\n You can easily do this by just having a class that calls each preprocessor in succession.\n\n For example, if you call the process_state_for_network and you\n have a sequence of AtariPreproccessor followed by\n HistoryPreprocessor. This this class could implement a\n process_state_for_network that does something like the following:\n\n state = atari.process_state_for_network(state)\n return history.process_state_for_network(state)\n \"\"\"\n def __init__(self, preprocessors):\n self.Atari = preprocessors['Atari']\n self.History = preprocessors['History']\n\n def process_state_for_network(self, observation):\n '''\n observation: 84x84 uint8\n\n return: 84x84 float32\n '''\n\n assert observation.dtype == 'uint8', 'observation in forward is not correct'\n assert observation.shape == (84, 84)\n tmp = self.History.process_state_for_network(observation)\n processed_state = tmp.astype('float32') / 255. \n \n random_index = np.random.randint(84, size=1)[0]\n # assert processed_state.shape == (1, 84, 84, 4)\n assert processed_state[0, random_index, random_index, 0] <=1. and processed_state[0, random_index, random_index, 0] >= 0., 'processed state is not correct while forward'\n return processed_state\n\n\n def process_state_from_memory_batch(self, batch_state_from_memory):\n \"\"\"The batches from replay memory will be uint8, convert to float32.\n\n Same as process_state_for_network but works on a batch of\n samples from the replay memory. Meaning you need to convert\n both state and next state values.\n\n batch_state_from_memory is a list which has length of batch_size, each item\n is a state with shape 84x84x4\n\n return a numpy array with shape 32x84x84x4\n \"\"\"\n batch_num = len(batch_state_from_memory)\n random_batch = np.random.randint(batch_num, size=1)[0]\n\n assert batch_state_from_memory[random_batch].shape == (84, 84, 4)\n assert batch_state_from_memory[random_batch].dtype == 'uint8'\n batch_state_processed = np.array(batch_state_from_memory).astype('float32') / 255.\n\n # assert batch_state_processed.shape == (batch_num, 84, 84, 4)\n random_index = np.random.randint(84, size=1)[0]\n assert batch_state_processed[random_batch, random_index, random_index, 0] >= 0 and batch_state_processed[random_batch, random_index, random_index, 0] <= 1\n\n return batch_state_processed\n" ]
[ [ "numpy.random.randint", "numpy.array", "numpy.zeros", "numpy.clip" ] ]
shaun95/LPCNet
[ "117214c3a63d4f43cf5741b299c497e85c983327" ]
[ "dump_lpcnet.py" ]
[ "#!/usr/bin/python3\n'''Copyright (c) 2017-2018 Mozilla\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport lpcnet\nimport sys\nimport numpy as np\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Layer, GRU, CuDNNGRU, Dense, Conv1D, Embedding\nfrom ulaw import ulaw2lin, lin2ulaw\nfrom mdense import MDense\nimport keras.backend as K\nimport h5py\nimport re\n\nmax_rnn_neurons = 1\nmax_conv_inputs = 1\nmax_mdense_tmp = 1\n\ndef printVector(f, vector, name, dtype='float'):\n v = np.reshape(vector, (-1));\n #print('static const float ', name, '[', len(v), '] = \\n', file=f)\n f.write('static const {} {}[{}] = {{\\n '.format(dtype, name, len(v)))\n for i in range(0, len(v)):\n f.write('{}'.format(v[i]))\n if (i!=len(v)-1):\n f.write(',')\n else:\n break;\n if (i%8==7):\n f.write(\"\\n \")\n else:\n f.write(\" \")\n #print(v, file=f)\n f.write('\\n};\\n\\n')\n return;\n\ndef printSparseVector(f, A, name):\n N = A.shape[0]\n W = np.zeros((0,))\n diag = np.concatenate([np.diag(A[:,:N]), np.diag(A[:,N:2*N]), np.diag(A[:,2*N:])])\n A[:,:N] = A[:,:N] - np.diag(np.diag(A[:,:N]))\n A[:,N:2*N] = A[:,N:2*N] - np.diag(np.diag(A[:,N:2*N]))\n A[:,2*N:] = A[:,2*N:] - np.diag(np.diag(A[:,2*N:]))\n printVector(f, diag, name + '_diag')\n idx = np.zeros((0,), dtype='int')\n for i in range(3*N//16):\n pos = idx.shape[0]\n idx = np.append(idx, -1)\n nb_nonzero = 0\n for j in range(N):\n if np.sum(np.abs(A[j, i*16:(i+1)*16])) > 1e-10:\n nb_nonzero = nb_nonzero + 1\n idx = np.append(idx, j)\n W = np.concatenate([W, A[j, i*16:(i+1)*16]])\n idx[pos] = nb_nonzero\n printVector(f, W, name)\n #idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)\n printVector(f, idx, name + '_idx', dtype='int')\n return;\n\ndef dump_layer_ignore(self, f, hf):\n print(\"ignoring layer \" + self.name + \" of type \" + self.__class__.__name__)\n return False\nLayer.dump_layer = dump_layer_ignore\n\ndef dump_sparse_gru(self, f, hf):\n global max_rnn_neurons\n name = 'sparse_' + self.name\n print(\"printing layer \" + name + \" of type sparse \" + self.__class__.__name__)\n weights = self.get_weights()\n printSparseVector(f, weights[1], name + '_recurrent_weights')\n printVector(f, weights[-1], name + '_bias')\n if hasattr(self, 'activation'):\n activation = self.activation.__name__.upper()\n else:\n activation = 'TANH'\n if hasattr(self, 'reset_after') and not self.reset_after:\n reset_after = 0\n else:\n reset_after = 1\n neurons = weights[0].shape[1]//3\n max_rnn_neurons = max(max_rnn_neurons, neurons)\n f.write('const SparseGRULayer {} = {{\\n {}_bias,\\n {}_recurrent_weights_diag,\\n {}_recurrent_weights,\\n {}_recurrent_weights_idx,\\n {}, ACTIVATION_{}, {}\\n}};\\n\\n'\n .format(name, name, name, name, name, weights[0].shape[1]//3, activation, reset_after))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights[0].shape[1]//3))\n hf.write('#define {}_STATE_SIZE {}\\n'.format(name.upper(), weights[0].shape[1]//3))\n hf.write('extern const SparseGRULayer {};\\n\\n'.format(name));\n return True\n\ndef dump_gru_layer(self, f, hf):\n global max_rnn_neurons\n name = self.name\n print(\"printing layer \" + name + \" of type \" + self.__class__.__name__)\n weights = self.get_weights()\n printVector(f, weights[0], name + '_weights')\n printVector(f, weights[1], name + '_recurrent_weights')\n printVector(f, weights[-1], name + '_bias')\n if hasattr(self, 'activation'):\n activation = self.activation.__name__.upper()\n else:\n activation = 'TANH'\n if hasattr(self, 'reset_after') and not self.reset_after:\n reset_after = 0\n else:\n reset_after = 1\n neurons = weights[0].shape[1]//3\n max_rnn_neurons = max(max_rnn_neurons, neurons)\n f.write('const GRULayer {} = {{\\n {}_bias,\\n {}_weights,\\n {}_recurrent_weights,\\n {}, {}, ACTIVATION_{}, {}\\n}};\\n\\n'\n .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]//3, activation, reset_after))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights[0].shape[1]//3))\n hf.write('#define {}_STATE_SIZE {}\\n'.format(name.upper(), weights[0].shape[1]//3))\n hf.write('extern const GRULayer {};\\n\\n'.format(name));\n return True\nCuDNNGRU.dump_layer = dump_gru_layer\nGRU.dump_layer = dump_gru_layer\n\ndef dump_dense_layer_impl(name, weights, bias, activation, f, hf):\n printVector(f, weights, name + '_weights')\n printVector(f, bias, name + '_bias')\n f.write('const DenseLayer {} = {{\\n {}_bias,\\n {}_weights,\\n {}, {}, ACTIVATION_{}\\n}};\\n\\n'\n .format(name, name, name, weights.shape[0], weights.shape[1], activation))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights.shape[1]))\n hf.write('extern const DenseLayer {};\\n\\n'.format(name));\n\ndef dump_dense_layer(self, f, hf):\n name = self.name\n print(\"printing layer \" + name + \" of type \" + self.__class__.__name__)\n weights = self.get_weights()\n activation = self.activation.__name__.upper()\n dump_dense_layer_impl(name, weights[0], weights[1], activation, f, hf)\n return False\n\nDense.dump_layer = dump_dense_layer\n\ndef dump_mdense_layer(self, f, hf):\n global max_mdense_tmp\n name = self.name\n print(\"printing layer \" + name + \" of type \" + self.__class__.__name__)\n weights = self.get_weights()\n printVector(f, np.transpose(weights[0], (1, 2, 0)), name + '_weights')\n printVector(f, np.transpose(weights[1], (1, 0)), name + '_bias')\n printVector(f, np.transpose(weights[2], (1, 0)), name + '_factor')\n activation = self.activation.__name__.upper()\n max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2])\n f.write('const MDenseLayer {} = {{\\n {}_bias,\\n {}_weights,\\n {}_factor,\\n {}, {}, {}, ACTIVATION_{}\\n}};\\n\\n'\n .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights[0].shape[0]))\n hf.write('extern const MDenseLayer {};\\n\\n'.format(name));\n return False\nMDense.dump_layer = dump_mdense_layer\n\ndef dump_conv1d_layer(self, f, hf):\n global max_conv_inputs\n name = self.name\n print(\"printing layer \" + name + \" of type \" + self.__class__.__name__)\n weights = self.get_weights()\n printVector(f, weights[0], name + '_weights')\n printVector(f, weights[-1], name + '_bias')\n activation = self.activation.__name__.upper()\n max_conv_inputs = max(max_conv_inputs, weights[0].shape[1]*weights[0].shape[0])\n f.write('const Conv1DLayer {} = {{\\n {}_bias,\\n {}_weights,\\n {}, {}, {}, ACTIVATION_{}\\n}};\\n\\n'\n .format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights[0].shape[2]))\n hf.write('#define {}_STATE_SIZE ({}*{})\\n'.format(name.upper(), weights[0].shape[1], (weights[0].shape[0]-1)))\n hf.write('#define {}_DELAY {}\\n'.format(name.upper(), (weights[0].shape[0]-1)//2))\n hf.write('extern const Conv1DLayer {};\\n\\n'.format(name));\n return True\nConv1D.dump_layer = dump_conv1d_layer\n\n\ndef dump_embedding_layer_impl(name, weights, f, hf):\n printVector(f, weights, name + '_weights')\n f.write('const EmbeddingLayer {} = {{\\n {}_weights,\\n {}, {}\\n}};\\n\\n'\n .format(name, name, weights.shape[0], weights.shape[1]))\n hf.write('#define {}_OUT_SIZE {}\\n'.format(name.upper(), weights.shape[1]))\n hf.write('extern const EmbeddingLayer {};\\n\\n'.format(name));\n\ndef dump_embedding_layer(self, f, hf):\n name = self.name\n print(\"printing layer \" + name + \" of type \" + self.__class__.__name__)\n weights = self.get_weights()[0]\n dump_embedding_layer_impl(name, weights, f, hf)\n return False\nEmbedding.dump_layer = dump_embedding_layer\n\n\nmodel, _, _ = lpcnet.new_lpcnet_model(rnn_units1=384, use_gpu=False)\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])\n#model.summary()\n\nmodel.load_weights(sys.argv[1])\n\nif len(sys.argv) > 2:\n cfile = sys.argv[2];\n hfile = sys.argv[3];\nelse:\n cfile = 'library/src/nnet_data.c'\n hfile = 'library/include/nnet_data.h'\n\n\nf = open(cfile, 'w')\nhf = open(hfile, 'w')\n\n\nf.write('/*This file is automatically generated from a Keras model*/\\n\\n')\nf.write('#ifdef HAVE_CONFIG_H\\n#include \"config.h\"\\n#endif\\n\\n#include \"nnet.h\"\\n#include \"{}\"\\n\\n'.format(hfile))\n\nhf.write('/*This file is automatically generated from a Keras model*/\\n\\n')\nhf.write('#ifndef RNN_DATA_H\\n#define RNN_DATA_H\\n\\n#include \"nnet.h\"\\n\\n')\n\nembed_size = lpcnet.embed_size\n\nE = model.get_layer('embed_sig').get_weights()[0]\nW = model.layers[18].get_weights()[0][:embed_size,:]\ndump_embedding_layer_impl('gru_a_embed_sig', np.dot(E, W), f, hf)\nW = model.layers[18].get_weights()[0][embed_size:2*embed_size,:]\ndump_embedding_layer_impl('gru_a_embed_pred', np.dot(E, W), f, hf)\nE = model.get_layer('embed_exc').get_weights()[0]\nW = model.layers[18].get_weights()[0][2*embed_size:3*embed_size,:]\ndump_embedding_layer_impl('gru_a_embed_exc', np.dot(E, W), f, hf)\nW = model.layers[18].get_weights()[0][3*embed_size:,:]\n#FIXME: dump only half the biases\nb = model.layers[18].get_weights()[2]\ndump_dense_layer_impl('gru_a_dense_feature', W, b, 'LINEAR', f, hf)\n\nlayer_list = []\nfor i, layer in enumerate(model.layers):\n if layer.dump_layer(f, hf):\n layer_list.append(layer.name)\n\ndump_sparse_gru(model.get_layer('gru_a'), f, hf)\n\nhf.write('#define MAX_RNN_NEURONS {}\\n\\n'.format(max_rnn_neurons))\nhf.write('#define MAX_CONV_INPUTS {}\\n\\n'.format(max_conv_inputs))\nhf.write('#define MAX_MDENSE_TMP {}\\n\\n'.format(max_mdense_tmp))\n\n\nhf.write('typedef struct {\\n')\nfor i, name in enumerate(layer_list):\n hf.write(' float {}_state[{}_STATE_SIZE];\\n'.format(name, name.upper())) \nhf.write('} NNetState;\\n')\n\nhf.write('\\n\\n#endif\\n')\n\nf.close()\nhf.close()\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.abs", "numpy.reshape", "numpy.concatenate", "numpy.append", "numpy.transpose", "numpy.zeros" ] ]
fornaxai/Mars-Express-Challenge
[ "4e0dff9909df0d10e507083af59326b3342d67fe" ]
[ "preprocessing/prepare_evtf.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author: fornax\n\"\"\"\nfrom __future__ import print_function, division\nimport os\nimport numpy as np\nimport pandas as pd\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nos.sys.path.append(os.path.dirname(os.getcwd()))\nimport prepare_data1 as prep\nDATA_PATH = os.path.join('..', prep.DATA_PATH)\n\n# load EVTF\nprint('Loading EVTF...')\nevtf = pd.read_csv(os.path.join(DATA_PATH, 'evtf.csv'))\n###############################################################################\n############################# OCCULTATIONS ####################################\n###############################################################################\nfeats_mars_occultations = ['OCC_PHOBOS', 'PHO_PENUMBRA', 'PHO_UMBRA',\n 'MAR_PENUMBRA', 'MAR_UMBRA', 'OCC_MARS_200KM', 'OCC_MARS',\n 'OCC_DEIMOS', 'DEI_PENUMBRA']\n#, 'DEI_UMBRA'] # DEI_UMBRA has only 2 occurences and they don't match in the \n# data well (end->start->end->start)\n\nfor feat in feats_mars_occultations:\n evtf['%s' % feat] = 0\n\nfor feat in feats_mars_occultations:\n print('Processing %s' % feat)\n if feat == 'OCC_MARS':\n rule_start = lambda x: feat in x and 'START' in x and 'OCC_MARS_200KM' not in x\n rule_end = lambda x: feat in x and 'END' in x and 'OCC_MARS_200KM' not in x\n else:\n rule_start = lambda x: feat in x and 'START' in x\n rule_end = lambda x: feat in x and 'END' in x\n starts = np.where(map(rule_start, evtf.description.values))[0]\n ends = np.where(map(rule_end, evtf.description.values))[0]\n assert(len(starts) == len(ends))\n assert(starts[0] < ends[0])\n # indicate ongoing events\n for start, end in zip(starts, ends):\n evtf.ix[start:end, '%s' % feat] = 1\n\n###### ALL OCCULTATIONS COMBINED ######\n\ndef merge_embedded_occ(occ_idx_list):\n prev_start, prev_end = occ_idx_list[0]\n approved_list = []\n for start, end in occ_idx_list:\n if start > prev_end:\n approved_list.append((prev_start, prev_end))\n prev_start, prev_end = start, end\n else:\n prev_end = end \n approved_list.append((prev_start, prev_end))\n return approved_list\n\nprint('Processing all occultations')\nevtf['OCC'] = 0\n\nrule_start = lambda x: any(map(lambda y: y in x, feats_mars_occultations)) and 'START' in x\nrule_end = lambda x: any(map(lambda y: y in x, feats_mars_occultations)) and 'END' in x\n\nstarts = np.where(map(rule_start, evtf.description.values))[0]\nends = np.where(map(rule_end, evtf.description.values))[0]\nassert(len(starts) == len(ends))\nassert(starts[0] < ends[0])\n\nnew_list = merge_embedded_occ(zip(starts, ends))\nstarts, ends = zip(*new_list)\n\nfor start, end in zip(starts, ends):\n evtf.ix[start:end, 'OCC'] = 1\n\n###############################################################################\n############################# X/Y POINTING ####################################\n###############################################################################\n'''\nTypes NPSS and NPNS indicate the times in the mission, when the pointing\nof the x axis has to switch from North to South (NPSS) or from South to North\n(NPNS) in order to avoid Sun incidence on the S/C -x face in nadir pointing\nmode around Mars.\nIn nadir pointing mode, with the x axis perpendicular to the ground track, the\nangle between the S/C -x axis and the Sun direction varies around the peri-\ncentre by some degrees (e.g. at the switching time around mid March 2004\nabout 5 degrees). This means that there is not a single date and time to\nswitch to the correct x axis pointing or, conversely, depending on the duration\nof the nadir pointing, it might therefore not be possible, to avoid Sun incidence\non the S/C -x face during a complete pericentre passage in nadir pointing\nmode (neither with North nor with South pointing option). Instead, the dura-\ntion of the nadir pointing has to be reduced or a small Sun incidence must be\ntolerated.\n'''\nfeats_pos_changes = ['NADIR_POINTING_X_N_TO_S_SWITCH', 'NADIR_POINTING_X_S_TO_N_SWITCH'\n 'EARTH_POINTING_Y_N_TO_S_SWITCH', 'EARTH_POINTING_Y_S_TO_N_SWITCH']\n\nevtf['NADIR_POINTING_X'] = 0\nevtf['EARTH_POINTING_Y'] = 0\n\nfor feat in ['NADIR_POINTING_X', 'EARTH_POINTING_Y']:\n print('Processing %s' % feat)\n changes = np.where(map(lambda x: feat in x, evtf.description.values))[0]\n for start, end in zip(changes, np.concatenate([changes[1:], [len(evtf)]])):\n evtf.ix[start:end, '%s' % feat] = 1 if 'N_TO_S' in evtf.description.values[start] else -1\n evtf.ix[0:changes[0], '%s' % feat] = evtf.ix[changes[0], '%s' % feat] * -1\n\n\n###############################################################################\n########################## TRAJECTORY EVENTS ##################################\n###############################################################################\n'''\n'x km descend’ and ‘x km ascend’, refer to the event\nwhen the height of the S/C position above the Mars reference ellipsoid drops\nbelow or rises above x km.\n'''\nfeats_trajectory = np.unique(\n filter(lambda x: x.endswith('SCEND'), \n np.unique(evtf.description)))\n\nevtf['trajectory_position_above_reference'] = 0\nevtf['trajectory_direction'] = 0\nchanges_trajectory = np.where(map(lambda x: x.endswith('SCEND'), evtf.description.values))[0]\nprint('Processing trajectory changes')\nfor start, end in zip(changes_trajectory, np.concatenate([changes_trajectory[1:], [len(evtf)]])):\n splits = evtf.description.iloc[start].split('_')\n pos = int(splits[0])\n updown = 1 if splits[-1] == 'ASCEND' else -1\n evtf.ix[start:end, 'trajectory_position_above_reference'] = pos\n evtf.ix[start:end, 'trajectory_direction'] = updown\n\n###############################################################################\n################################ SAVING #######################################\n###############################################################################\nevtf.drop(['description'], axis=1, inplace=True)\n\nfilename = 'evtf_processed'\nsavepath = os.path.join(DATA_PATH, filename + '.csv')\nprint('Saving to %s' % savepath)\nevtf.to_csv(savepath, index=False)\n" ]
[ [ "numpy.unique" ] ]
PuchatekwSzortach/printed_characters_net
[ "9478d4ecffeca040cc353676382d0ec775558458" ]
[ "net/vision.py" ]
[ "\"\"\"\nModule with computer vision related code.\nDetecting card candidates in images, handling image contours and like.\n\"\"\"\nimport cv2\nimport numpy as np\n\n\nclass CardCandidate:\n \"\"\"\n A very simple container for a card candidate\n \"\"\"\n\n def __init__(self, coordinates, image):\n\n self.coordinates = coordinates\n self.image = image\n\n\nclass CardCandidatesExtractor:\n \"\"\"\n Class for extracting card candidates from an input image\n \"\"\"\n\n def get_card_candidates(self, image, reconstruction_size):\n\n image_contours = self._get_image_contours(image)\n card_contours = self._get_card_like_contours(image_contours, image.shape[0] * image.shape[1])\n outer_contours = self._get_outermost_contours(card_contours, image.shape)\n\n # OpenCV puts into contour an unnecessary dimension, so remove it\n squeezed_contours = [np.squeeze(contour) for contour in outer_contours]\n\n reconstruction_contour = np.array([\n [0, 0],\n [reconstruction_size[1], 0],\n [reconstruction_size[1], reconstruction_size[0]],\n [0, reconstruction_size[0]]\n ])\n\n # We need to make sure ordering within each contour is consistent\n ordered_contours = [get_ordered_card_contour(contour) for contour in squeezed_contours]\n\n card_candidates = []\n\n for contour in ordered_contours:\n\n reconstruction = get_card_reconstruction(\n image, contour, reconstruction_contour, reconstruction_size)\n\n card_candidates.append(CardCandidate(contour, reconstruction))\n\n return card_candidates\n\n def _get_image_contours(self, image):\n\n grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY, cv2.THRESH_BINARY)\n thresholded = self._get_thresholded_image(grayscale)\n\n _, contours, _ = cv2.findContours(thresholded.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n return contours\n\n def _get_thresholded_image(self, grayscale_image):\n\n return cv2.adaptiveThreshold(\n src=grayscale_image, maxValue=255,\n adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,\n thresholdType=cv2.THRESH_BINARY, blockSize=9, C=-5)\n\n def _get_card_like_contours(self, contours, image_size):\n\n epsilon = image_size * 1e-5\n\n simplified_contours = [cv2.approxPolyDP(contour, epsilon=epsilon, closed=True) for contour in contours]\n\n card_like_contours = [\n contour for contour in simplified_contours\n if is_contour_card_like(contour, image_size)]\n\n return card_like_contours\n\n def _get_outermost_contours(self, contours, image_shape):\n\n image = np.zeros(shape=(image_shape[0], image_shape[1])).astype(np.uint8)\n cv2.drawContours(image, contours, contourIdx=-1, color=255)\n\n # Use OpenCV for heavy lifting\n _, outer_contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Run through sanity check, just to be extra careful, and return results\n return self._get_card_like_contours(outer_contours, image_shape[0] * image_shape[1])\n\n\ndef get_card_reconstruction(image, contour, reconstruction_contour, reconstruction_size):\n \"\"\"\n Given an image, a contour inside it and a reconstruction_contour,\n map image contained inside contour to reconstruction contour.\n Both contours are assumed to have exactly 4 points.\n :param image: image to reconstruct from\n :param contour: contour around area to recontruct from image\n :param reconstruction_contour: contour of intended reconstruction\n :return: CardReconstruction object\n \"\"\"\n\n # Do a sanity check\n if len(contour) != 4 or len(reconstruction_contour) != 4:\n message = \"Both contour (len = {}) and reconstruction contour (len = {}) \"\n \"should have exactly 4 points.\".format(len(contour), len(reconstruction_contour))\n raise ValueError(message)\n\n transformation_matrix = cv2.getPerspectiveTransform(\n contour.astype(np.float32), reconstruction_contour.astype(np.float32))\n\n shape = np.max(reconstruction_contour, axis=0).astype(np.int32)\n\n reconstruction = cv2.warpPerspective(image, transformation_matrix, tuple(shape))\n\n resized_reconstruction = cv2.resize(reconstruction, reconstruction_size)\n grayscale_reconstruction = cv2.cvtColor(resized_reconstruction, cv2.COLOR_RGB2GRAY)\n return grayscale_reconstruction\n\n\ndef get_ordered_card_contour(contour):\n \"\"\"\n Given a 4-points contour, return a version that has left top point as first element,\n and then proceeds clockwise.\n :param contour: A 4-points, roughly rectangular contour that represents a card candidate\n :return: contour with points rotated so that first contour is top left, and following\n contours are in clockwise-order\n \"\"\"\n\n # A sanity check\n if len(contour) != 4:\n raise ValueError(\"Contour length must be 4\")\n\n ordered_contour = np.zeros_like(contour)\n\n # Sum coordinates for each point\n sums = np.sum(contour, axis=1)\n\n # Top left contour will have smallest coordinates sum,\n # right bottom contour will have largest coordinates sum\n ordered_contour[0] = contour[np.argmin(sums)]\n ordered_contour[2] = contour[np.argmax(sums)]\n\n differences = np.diff(contour, axis=1)\n\n # Top right contour will have smallest coordinates difference,\n # bottom left contour will have largest coordinates difference\n ordered_contour[1] = contour[np.argmin(differences)]\n ordered_contour[3] = contour[np.argmax(differences)]\n\n return ordered_contour\n\n\ndef is_contour_card_like(contour, image_size):\n \"\"\"\n Given a contour, judge whether it is like to represent a card.\n The criteria are that it is roughly rectangular and has a sensible area w.r.t.\n total image size\n :param contour: contour to judge\n :param image_size: number of pixels in image\n :return: Bool\n \"\"\"\n\n max_area = 0.3 * image_size\n min_area = 0.001 * image_size\n\n # Only contours with 4 points can be card candidates\n if len(contour) != 4:\n return False\n\n contour_area = cv2.contourArea(contour)\n\n # Contours must be within acceptable size\n if contour_area < min_area or max_area < contour_area:\n return False\n\n squeezed_contour = np.squeeze(contour)\n min_angle = get_minimum_inner_angle(squeezed_contour)\n max_angle = get_maximum_inner_angle(squeezed_contour)\n\n # And have inner angles somewhat close to right angles\n if min_angle < np.pi / 6 or np.pi / 1.5 < max_angle:\n return False\n\n # All tests passed, so judge contour as card like\n return True\n\n\ndef get_contours_inner_angles(contour):\n \"\"\"\n Given a closed contour, return a list of its inner angles in radians\n :param contour: a 2D numpy array\n :return: 1D numpy array of angles in radions\n \"\"\"\n\n # Unroll all points triplets to vectors\n a = contour\n b = np.tile(contour, reps=(2, 1))[1:1 + len(contour)]\n c = np.tile(contour, reps=(2, 1))[2:2 + len(contour)]\n\n a_side = np.linalg.norm(b - a, axis=1)\n b_side = np.linalg.norm(c - b, axis=1)\n c_side = np.linalg.norm(c - a, axis=1)\n\n cosines = (a_side**2 + b_side**2 - c_side**2) / (2 * a_side * b_side)\n return np.arccos(cosines)\n\n\ndef get_minimum_inner_angle(contour):\n \"\"\"\n Given a list of 2D points representing a closed contour,\n return smallest inner angle\n :param contour: a 2D numpy array\n :return: angle in radians\n \"\"\"\n\n return np.min(get_contours_inner_angles(contour))\n\n\ndef get_maximum_inner_angle(contour):\n \"\"\"\n Given a list of 2D points representing a closed contour,\n return largest inner angle\n :param contour: a 2D numpy array\n :return: angle in radians\n \"\"\"\n\n return np.max(get_contours_inner_angles(contour))\n" ]
[ [ "numpy.squeeze", "numpy.linalg.norm", "numpy.arccos", "numpy.tile", "numpy.max", "numpy.argmax", "numpy.zeros_like", "numpy.diff", "numpy.argmin", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
manipopopo/pytorch-lightning
[ "ef7d41692ca04bb9877da5c743f80fceecc6a100" ]
[ "pytorch_lightning/plugins/training_type/ddp_spawn.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nimport re\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.distributed\nimport torch.multiprocessing as mp\nfrom torch.nn.parallel.distributed import DistributedDataParallel\n\nfrom pytorch_lightning.distributed.dist import LightningDistributed\nfrom pytorch_lightning.overrides import LightningDistributedModule\nfrom pytorch_lightning.overrides.distributed import prepare_for_backward\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\nfrom pytorch_lightning.plugins.training_type.parallel import ParallelPlugin\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom pytorch_lightning.utilities import (\n _TORCH_GREATER_EQUAL_1_7,\n _TORCH_GREATER_EQUAL_1_8,\n rank_zero_deprecation,\n rank_zero_warn,\n)\nfrom pytorch_lightning.utilities.cloud_io import atomic_save\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.distributed import (\n distributed_available,\n rank_zero_info,\n rank_zero_only,\n ReduceOp,\n sync_ddp_if_available,\n)\nfrom pytorch_lightning.utilities.seed import reset_seed\n\nif _TORCH_GREATER_EQUAL_1_8:\n from pytorch_lightning.utilities.distributed import register_ddp_comm_hook\n\nlog = logging.getLogger(__name__)\n\n\nclass DDPSpawnPlugin(ParallelPlugin):\n \"\"\"\n Spawns processes using the :func:`torch.multiprocessing.spawn` method and joins processes after\n training finishes.\n \"\"\"\n\n distributed_backend = \"ddp_spawn\"\n\n def __init__(\n self,\n parallel_devices: Optional[List[torch.device]] = None,\n num_nodes: Optional[int] = None,\n cluster_environment: ClusterEnvironment = None,\n sync_batchnorm: Optional[bool] = None,\n ddp_comm_state: Optional[object] = None,\n ddp_comm_hook: Optional[callable] = None,\n ddp_comm_wrapper: Optional[callable] = None,\n **kwargs: Any,\n ):\n super().__init__(parallel_devices=parallel_devices, cluster_environment=cluster_environment)\n if num_nodes is not None:\n rank_zero_deprecation(\n \"Argument `num_nodes` in `DDPSpawnPlugin` is deprecated in v1.4, and will be removed in v1.6. \"\n \"Notice that it will be overriden by the trainer setting.\"\n )\n self._num_nodes = num_nodes or 1\n if sync_batchnorm is not None:\n rank_zero_deprecation(\n \"Argument `sync_batchnorm` in `DDPSpawnPlugin` is deprecated in v1.4, and will be removed in v1.6. \"\n \"Notice that it will be overriden by the trainer setting.\"\n )\n self._sync_batchnorm = sync_batchnorm or False\n self._ddp_kwargs = kwargs\n self.dist = LightningDistributed()\n self.num_processes = len(parallel_devices) if parallel_devices is not None else 0\n self.mp_queue = None\n self._ddp_comm_state = ddp_comm_state\n self._ddp_comm_hook = ddp_comm_hook\n self._ddp_comm_wrapper = ddp_comm_wrapper\n self._local_rank = 0\n self.set_world_ranks()\n\n @property\n def num_nodes(self) -> int:\n return self._num_nodes\n\n @num_nodes.setter\n def num_nodes(self, num_nodes: int) -> None:\n # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks\n self._num_nodes = num_nodes\n self.set_world_ranks()\n\n @property\n def sync_batchnorm(self) -> bool:\n return self._sync_batchnorm\n\n @sync_batchnorm.setter\n def sync_batchnorm(self, sync_batchnorm: bool) -> None:\n self._sync_batchnorm = sync_batchnorm\n\n @property\n def local_rank(self) -> int:\n return self._local_rank\n\n def __getstate__(self):\n \"\"\" Makes this plugin pickleable without destroying the queue in the current process. \"\"\"\n state = self.__dict__.copy()\n state[\"mp_queue\"] = None\n return state\n\n def __setstate__(self, state):\n self.__dict__ = state\n\n @property\n def root_device(self):\n return self.parallel_devices[self.local_rank]\n\n @property\n def distributed_sampler_kwargs(self):\n distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)\n return distributed_sampler_kwargs\n\n @property\n def _is_single_process_single_device(self):\n return True\n\n def setup(self, model):\n os.environ[\"MASTER_PORT\"] = str(self.cluster_environment.master_port())\n # pass in a state q\n smp = mp.get_context(\"spawn\")\n self.mp_queue = smp.SimpleQueue()\n\n def set_world_ranks(self, process_idx: int = 0) -> None:\n self._local_rank = process_idx\n if self.cluster_environment is None:\n return\n self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)\n self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)\n rank_zero_only.rank = self.cluster_environment.global_rank()\n\n @property\n def mp_spawn_kwargs(self):\n return {\n \"args\": (self.lightning_module.trainer, self.mp_queue),\n \"nprocs\": self.num_processes,\n }\n\n def start_training(self, trainer):\n mp.spawn(self.new_process, **self.mp_spawn_kwargs)\n # reset optimizers, since main process is never used for training and thus does not have a valid optim state\n trainer.optimizers = []\n\n def start_evaluating(self, trainer):\n mp.spawn(self.new_process, **self.mp_spawn_kwargs)\n\n def start_predicting(self, trainer):\n mp.spawn(self.new_process, **self.mp_spawn_kwargs)\n\n def new_process(self, process_idx, trainer, mp_queue):\n self.mp_queue = mp_queue\n\n reset_seed()\n\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n self.init_ddp_connection(self.global_rank, self.world_size)\n\n # TODO: we moved it to the trainer.fit after calling pre_dispatch\n # ... need to double check that it is the correct place\n # self.trainer.call_setup_hook(self.model)\n\n # set the ranks and devices\n self.dist.rank = self.global_rank\n self.dist.device = self.root_device\n\n # move the model to the correct device\n self.model_to_device()\n\n if self.sync_batchnorm:\n self.model = self.configure_sync_batchnorm(self.model)\n\n self.configure_ddp()\n\n self.barrier()\n\n results = trainer.run_stage()\n\n # persist info in ddp_spawn\n self.transfer_distrib_spawn_state_on_fit_end(results)\n\n def post_dispatch(self):\n # restore main state with best weights\n best_path = self.mp_queue.get()\n last_path = self.mp_queue.get()\n self._results = self.mp_queue.get()\n # get the `callback_metrics` and set it to the trainer\n # only in case the user does not override it.\n self.lightning_module.get_from_queue(self.mp_queue)\n\n # recover the weights of the processes trained in the children\n self.__recover_child_process_weights(best_path, last_path)\n\n def pre_configure_ddp(self):\n # if unset, default `find_unused_parameters` `True`\n # Many models require setting this parameter to True, as there are corner cases\n # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.\n # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.\n self._ddp_kwargs[\"find_unused_parameters\"] = self._ddp_kwargs.get(\"find_unused_parameters\", True)\n # todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization\n if _TORCH_GREATER_EQUAL_1_7 and not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(\n \"find_unused_parameters\", False\n ):\n rank_zero_warn(\n \"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` \"\n \"to properly work with DDP.\"\n )\n self._ddp_kwargs[\"find_unused_parameters\"] = True\n\n def _register_ddp_hooks(self) -> None:\n # currently, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode\n # https://github.com/pytorch/pytorch/blob/v1.8.0/torch/nn/parallel/distributed.py#L1080-L1084\n if (_TORCH_GREATER_EQUAL_1_8 and self.on_gpu and self._is_single_process_single_device):\n register_ddp_comm_hook(\n model=self._model,\n ddp_comm_state=self._ddp_comm_state,\n ddp_comm_hook=self._ddp_comm_hook,\n ddp_comm_wrapper=self._ddp_comm_wrapper,\n )\n\n def configure_ddp(self):\n self.pre_configure_ddp()\n self._model = DistributedDataParallel(\n LightningDistributedModule(self.model),\n device_ids=self.determine_ddp_device_ids(),\n **self._ddp_kwargs,\n )\n self._register_ddp_hooks()\n\n def init_ddp_connection(self, global_rank: Optional[int], world_size: Optional[int]) -> None:\n # TODO: this code is duplicated in DDP and DDPSpawn, make this a function\n global_rank = global_rank if global_rank is not None else self.cluster_environment.global_rank()\n world_size = world_size if world_size is not None else self.cluster_environment.world_size()\n os.environ[\"MASTER_ADDR\"] = self.cluster_environment.master_address()\n os.environ[\"MASTER_PORT\"] = str(self.cluster_environment.master_port())\n\n if not torch.distributed.is_initialized():\n log.info(f\"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}\")\n torch.distributed.init_process_group(\n self.torch_distributed_backend, rank=global_rank, world_size=world_size\n )\n\n # on rank=0 let everyone know training is starting\n rank_zero_info(\n f\"{'-' * 100}\\n\"\n f\"distributed_backend={self.torch_distributed_backend}\\n\"\n f\"All DDP processes registered. Starting ddp with {self.world_size} processes\\n\"\n f\"{'-' * 100}\\n\"\n )\n\n def determine_ddp_device_ids(self):\n if self.root_device.type == \"cpu\":\n return None\n return [self.root_device.index]\n\n def transfer_distrib_spawn_state_on_fit_end(self, results):\n checkpoint_callback = self.lightning_module.trainer.checkpoint_callback\n best_model_path = checkpoint_callback.best_model_path if checkpoint_callback else None\n\n # requires to compute the state_dict on all processes in case Metrics are present\n state_dict = self.lightning_module.state_dict()\n\n if self.global_rank == 0 and self.mp_queue is not None:\n rank_zero_warn(\"cleaning up ddp environment...\")\n\n # save the last weights\n last_path = None\n if (\n self.lightning_module.trainer.state.fn == TrainerFn.FITTING and best_model_path is not None\n and len(best_model_path) > 0\n ):\n last_path = re.sub(\".ckpt\", \".tmp_end.ckpt\", best_model_path)\n atomic_save(self.on_save(state_dict), last_path)\n\n # todo, pass complete checkpoint as state dictionary\n self.mp_queue.put(best_model_path)\n self.mp_queue.put(last_path)\n self.mp_queue.put(results)\n self.lightning_module.add_to_queue(self.mp_queue) # adds the `callback_metrics` to the queue\n\n def __recover_child_process_weights(self, best_path, last_path):\n # transfer back the best path to the trainer\n if self.lightning_module.trainer.checkpoint_callback:\n self.lightning_module.trainer.checkpoint_callback.best_model_path = best_path\n # todo, pass also best score\n\n # load last weights\n if last_path is not None and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:\n ckpt = pl_load(last_path, map_location=lambda storage, loc: storage)\n self.lightning_module.load_state_dict(ckpt)\n\n def barrier(self, *args, **kwargs) -> None:\n if not distributed_available():\n return\n if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == \"nccl\":\n torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())\n else:\n torch.distributed.barrier()\n\n def broadcast(self, obj: object, src: int = 0) -> object:\n if not distributed_available():\n return obj\n return self.dist.broadcast(obj)\n\n def model_to_device(self):\n if self.root_device.type == \"cuda\":\n # set the device on the spawned subprocesses\n torch.cuda.set_device(self.root_device)\n self.model.to(self.root_device)\n\n def pre_backward(self, closure_loss: torch.Tensor) -> None:\n \"\"\"Run before precision plugin executes backward\"\"\"\n if not self.lightning_module.automatic_optimization:\n prepare_for_backward(self.model, closure_loss)\n\n def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = \"mean\") -> torch.Tensor:\n \"\"\"\n Reduces a tensor from several distributed processes to one aggregated tensor.\n\n Args:\n tensor: the tensor to sync and reduce\n group: the process group to gather results from. Defaults to all processes (world)\n reduce_op: the reduction operation. Defaults to 'mean'/'avg'.\n Can also be a string 'sum' to calculate the sum during reduction.\n\n Return:\n reduced value, except when the input was not a tensor the output remains is unchanged\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)\n return tensor\n\n def training_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def validation_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def test_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def predict_step(self, *args, **kwargs):\n return self.model(*args, **kwargs)\n\n def post_training_step(self):\n if not self.lightning_module.automatic_optimization:\n self.model.require_backward_grad_sync = True\n\n @classmethod\n def register_plugins(cls, plugin_registry: Dict) -> None:\n plugin_registry.register(\n \"ddp_spawn_find_unused_parameters_false\",\n cls,\n description=\"DDPSpawn Plugin with `find_unused_parameters` as False\",\n find_unused_parameters=False\n )\n" ]
[ [ "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.cuda.set_device", "torch.distributed.is_initialized", "torch.distributed.barrier", "torch.multiprocessing.get_context", "torch.distributed.get_backend" ] ]
liuzhongling/Image-Captioning-Project-CVND
[ "85a6ae87bd3994e42c411ee748cc16fab7300dd6" ]
[ "data_loader.py" ]
[ "import nltk\nimport os\nimport torch\nimport torch.utils.data as data\nfrom vocabulary import Vocabulary\nfrom PIL import Image\nfrom pycocotools.coco import COCO\nimport numpy as np\nfrom tqdm import tqdm\nimport random\nimport json\n\ndef get_loader(transform,\n mode='train',\n batch_size=1,\n vocab_threshold=None,\n vocab_file='./vocab.pkl',\n start_word=\"<start>\",\n end_word=\"<end>\",\n unk_word=\"<unk>\",\n vocab_from_file=True,\n num_workers=0,\n cocoapi_loc='opt/'):\n \"\"\"Returns the data loader.\n Args:\n transform: Image transform.\n mode: One of 'train' or 'test'.\n batch_size: Batch size (if in testing mode, must have batch_size=1).\n vocab_threshold: Minimum word count threshold.\n vocab_file: File containing the vocabulary. \n start_word: Special word denoting sentence start.\n end_word: Special word denoting sentence end.\n unk_word: Special word denoting unknown words.\n vocab_from_file: If False, create vocab from scratch & override any existing vocab_file.\n If True, load vocab from from existing vocab_file, if it exists.\n num_workers: Number of subprocesses to use for data loading \n cocoapi_loc: The location of the folder containing the COCO API: https://github.com/cocodataset/cocoapi\n \"\"\"\n \n assert mode in ['train', 'test'], \"mode must be one of 'train' or 'test'.\"\n if vocab_from_file==False: assert mode=='train', \"To generate vocab from captions file, must be in training mode (mode='train').\"\n\n # Based on mode (train, val, test), obtain img_folder and annotations_file.\n if mode == 'train':\n if vocab_from_file==True: assert os.path.exists(vocab_file), \"vocab_file does not exist. Change vocab_from_file to False to create vocab_file.\"\n img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/train2014/')\n annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/captions_train2014.json')\n if mode == 'test':\n assert batch_size==1, \"Please change batch_size to 1 if testing your model.\"\n assert os.path.exists(vocab_file), \"Must first generate vocab.pkl from training data.\"\n assert vocab_from_file==True, \"Change vocab_from_file to True.\"\n img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/test2014/')\n annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/image_info_test2014.json')\n\n # COCO caption dataset.\n dataset = CoCoDataset(transform=transform,\n mode=mode,\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_file=vocab_file,\n start_word=start_word,\n end_word=end_word,\n unk_word=unk_word,\n annotations_file=annotations_file,\n vocab_from_file=vocab_from_file,\n img_folder=img_folder)\n\n if mode == 'train':\n # Randomly sample a caption length, and sample indices with that length.\n indices = dataset.get_train_indices()\n # Create and assign a batch sampler to retrieve a batch with the sampled indices.\n initial_sampler = data.sampler.SubsetRandomSampler(indices=indices)\n # data loader for COCO dataset.\n data_loader = data.DataLoader(dataset=dataset, \n num_workers=num_workers,\n batch_sampler=data.sampler.BatchSampler(sampler=initial_sampler,\n batch_size=dataset.batch_size,\n drop_last=False))\n else:\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=dataset.batch_size,\n shuffle=True,\n num_workers=num_workers)\n\n return data_loader\n\nclass CoCoDataset(data.Dataset):\n \n def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n self.transform = transform\n self.mode = mode\n self.batch_size = batch_size\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n self.img_folder = img_folder\n if self.mode == 'train':\n self.coco = COCO(annotations_file)\n self.ids = list(self.coco.anns.keys())\n print('Obtaining caption lengths...')\n all_tokens = [nltk.tokenize.word_tokenize(str(self.coco.anns[self.ids[index]]['caption']).lower()) for index in tqdm(np.arange(len(self.ids)))]\n self.caption_lengths = [len(token) for token in all_tokens]\n else:\n test_info = json.loads(open(annotations_file).read())\n self.paths = [item['file_name'] for item in test_info['images']]\n \n def __getitem__(self, index):\n # obtain image and caption if in training mode\n if self.mode == 'train':\n ann_id = self.ids[index]\n caption = self.coco.anns[ann_id]['caption']\n img_id = self.coco.anns[ann_id]['image_id']\n path = self.coco.loadImgs(img_id)[0]['file_name']\n\n # Convert image to tensor and pre-process using transform\n image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\n image = self.transform(image)\n\n # Convert caption to tensor of word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(self.vocab(self.vocab.start_word))\n caption.extend([self.vocab(token) for token in tokens])\n caption.append(self.vocab(self.vocab.end_word))\n caption = torch.Tensor(caption).long()\n\n # return pre-processed image and caption tensors\n return image, caption\n\n # obtain image if in test mode\n else:\n path = self.paths[index]\n\n # Convert image to tensor and pre-process using transform\n PIL_image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\n orig_image = np.array(PIL_image)\n image = self.transform(PIL_image)\n\n # return original image and pre-processed image tensor\n return orig_image, image\n\n def get_train_indices(self):\n sel_length = np.random.choice(self.caption_lengths)\n all_indices = np.where([self.caption_lengths[i] == sel_length for i in np.arange(len(self.caption_lengths))])[0]\n indices = list(np.random.choice(all_indices, size=self.batch_size))\n return indices\n\n def __len__(self):\n if self.mode == 'train':\n return len(self.ids)\n else:\n return len(self.paths)" ]
[ [ "torch.Tensor", "numpy.random.choice", "torch.utils.data.DataLoader", "torch.utils.data.sampler.SubsetRandomSampler", "numpy.array", "torch.utils.data.sampler.BatchSampler" ] ]
goofyweng/mavros_multi_uav_control
[ "60a17e59bd54b491521ac4fb173c9bca74490718" ]
[ "scripts/ru_s mTSP/f.py" ]
[ "#!/usr/bin/env python\n#coding=utf-8\n\n# calculate the individual ant's fitness\ndef longest_len(colony,edges,antNo,gp,n):\n # 回傳本次旅行團所需的最長距離\n import numpy as np\n\n indi_max=np.zeros((antNo,1))\n\n\n for i in range(antNo):\n length=np.zeros((gp,1))\n\n for k in range(gp):\n for j in range(n):\n # n=10, j=0~9\n currentNode=int(colony[int(3*i+k),j])\n nextNode=int(colony[int(3*i+k),j+1])\n\n length[k,0]=length[k,0]+edges[currentNode,nextNode]\n maxi=0\n for t in range(3):\n if length[t,0]>maxi:\n maxi=length[t,0]\n\n\n\n indi_max[i,0]=indi_max[i,0]+maxi\n\n return (indi_max)\n\n# calculate total length\ndef total_len(i,colony,edges,antNo,gp,n):\n # 計算個別旅團total len\n # i 代表第幾團\n total=0\n for k in range(gp):\n for x in range(n):\n # n=10, x=0~9\n currentNode=int(colony[int(3*i+k),x])\n nextNode=int(colony[int(3*i+k),x+1])\n\n total=total+edges[currentNode,nextNode]\n return (total)\n\ndef group_len(i,j,n,colony,edges,antNo):\n # 最小單位的總長度\n # i 為antNo j為第幾團 n為點數\n total=0\n\n\n for k in range(n):\n currentNode=int(colony[int(3*i+j),k])\n nextNode=int(colony[int(3*i+j),k+1])\n\n total=total+edges[currentNode,nextNode]\n\n return (total)\n\ndef total(bestTour,edges,gp,n):\n # 專門為bestTour 找到最短總路徑\n tolen=0\n for i in range(gp):\n for j in range(n):\n\n currentNode=int(bestTour[i,j])\n nextNode=int(bestTour[i,j+1])\n\n tolen=tolen+edges[currentNode,nextNode]\n return(tolen)\n" ]
[ [ "numpy.zeros" ] ]
Anonymous-ICML2022/Multi-Agent-Constrained-Policy-Optimisation
[ "0bfe52024e4d07600a39d3228de36fd75a3cd65d" ]
[ "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py" ]
[ "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nimport os\nimport mujoco_py as mjp\nfrom gym import error, spaces\n\nclass CoupledHalfCheetah(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, **kwargs):\n mujoco_env.MujocoEnv.__init__(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'coupled_half_cheetah.xml'), 5)\n utils.EzPickle.__init__(self)\n\n def step(self, action):\n\n #ADDED\n # xposbefore = self.sim.data.qpos[1]\n # t = self.data.time\n # wall_act = .02 * np.sin(t / 3) ** 2 - .004\n # mjp.functions.mj_rnePostConstraint(self.sim.model,\n # self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n # action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n # self.do_simulation(action_p_wall, self.frame_skip)\n # xposafter = self.sim.data.qpos[1]\n # wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n # wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n # xdist = wallpos - xposafter\n # obj_cost = int(np.abs(xdist) < 2)\n # if obj_cost > 0:\n # self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n # else:\n # self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n # ob = self._get_obs()\n # reward_ctrl = - 0.1 * np.square(action).sum()\n # reward_run = (xposafter - xposbefore) / self.dt\n # reward = reward_ctrl + reward_run\n # done = False\n\n\n\n\n # xposbefore1 = self.sim.data.qpos[0]\n # xposbefore2 = self.sim.data.qpos[len(self.sim.data.qpos) // 2]\n # print(\"self.sim.data.qpos\", self.sim.data.qpos)\n\n xposbefore1 = self.get_body_com(\"torso\")[0]\n xposbefore2 = self.get_body_com(\"torso2\")[0]\n\n yposbefore1 = self.get_body_com(\"torso\")[1]\n yposbefore2 = self.get_body_com(\"torso2\")[1]\n\n # ADDED\n t = self.data.time\n wall_act = .02 * np.sin(t / 3) ** 2 - .004\n mjp.functions.mj_rnePostConstraint(self.sim.model,\n self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n # print(\"action_p_wall\", np.array(action_p_wall).shape)\n # print(\"action\", np.array(action).shape)\n # print(\"self.frame_skip\", self.frame_skip)\n self.do_simulation(action_p_wall, self.frame_skip)\n\n # self.do_simulation(action, self.frame_skip)\n # xposafter1 = self.sim.data.qpos[0]\n # xposafter2 = self.sim.data.qpos[len(self.sim.data.qpos)//2]\n xposafter1 = self.get_body_com(\"torso\")[0]\n xposafter2 = self.get_body_com(\"torso2\")[0]\n\n yposafter1 = self.get_body_com(\"torso\")[1]\n yposafter2 = self.get_body_com(\"torso2\")[1]\n\n # ADDED\n wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n # wallpos1 = self.data.get_geom_xpos(\"obj_geom1\")[0]\n y_wallpos1 = self.data.get_geom_xpos(\"wall1\")[1]\n y_wallpos2 = self.data.get_geom_xpos(\"wall2\")[1]\n # print(\"x_wallpos1 = self.data.get_geom_xpos\", x_wallpos1)\n # print(\"x_wallpos2 = self.data.get_geom_xpos\", x_wallpos2)\n wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n xdist = np.abs(wallpos - xposafter1) #+ np.abs(wallpos - xposafter2) #+ (wallpos1 - xposafter1) + (wallpos1 - xposafter2)\n obj_cost = 0 # or int(np.abs(wallpos1 - xposafter2) < 5) or int(np.abs(wallpos1 - xposafter2) < 5)\\\n #\n if int(np.abs(wallpos - xposafter1) < 5) or int(np.abs(wallpos - xposafter2) < 5) \\\n or int(np.abs(y_wallpos1 - yposafter1) < 5) or int(np.abs(y_wallpos2 - yposafter2) < 5):\n obj_cost = 1\n\n # obj_cost = int(np.abs(xdist) < 5)\n # print(\"xposbefore1\", xposbefore1)\n # print(\"xposbefore2\", xposbefore2)\n # print(\"yposafter1\", yposafter1)\n # print(\"yposafter2\", yposafter2)\n # print(\"np.abs(x_wallpos1 - yposafter1)\", np.abs(x_wallpos1 - yposafter1))\n # print(\"xposafter1\", xposafter1)\n # print(\"xposafter2\", xposafter2)\n # print(\"wallpos\", wallpos)\n # print(\"wallpos1\", wallpos1)\n # print(\"xdist\", xdist)\n # print(\"(wallpos1 - xposafter2)\", (wallpos1 - xposafter2))\n # print(\"(wallpos - xposafter1)\", (wallpos - xposafter1))\n # print(\"(wallpos - xposafter2)\", (wallpos - xposafter2))\n if obj_cost > 0:\n self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n else:\n self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n ob = self._get_obs()\n\n ob = self._get_obs()\n reward_ctrl1 = - 0.1 * np.square(action[0:len(action)//2]).sum()\n reward_ctrl2 = - 0.1 * np.square(action[len(action)//2:]).sum()\n reward_run1 = (xposafter1 - xposbefore1)/self.dt\n reward_run2 = (xposafter2 - xposbefore2) / self.dt\n reward = (reward_ctrl1 + reward_ctrl2)/2.0 + (reward_run1 + reward_run2)/2.0\n done = False\n return ob, reward, done, dict(cost=obj_cost, reward_run1=reward_run1, reward_ctrl1=reward_ctrl1,\n reward_run2=reward_run2, reward_ctrl2=reward_ctrl2)\n\n def _get_obs(self):\n\n #AADED\n wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004\n xdist = (self.data.get_geom_xpos(\"obj_geom\")[0] - self.sim.data.qpos[1]) / 10\n\n return np.concatenate([\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat[1:],\n [wallvel],\n [wall_f],\n np.clip([xdist], -5, 5),\n ])\n\n # return np.concatenate([\n # self.sim.data.qpos.flat[1:],\n # self.sim.data.qvel.flat,\n # ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n def get_env_info(self):\n return {\"episode_limit\": self.episode_limit}\n\n def _set_action_space(self):\n bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n low, high = bounds.T\n low, high = low[:-1], high[:-1]\n self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n return self.action_space" ]
[ [ "numpy.squeeze", "numpy.sin", "numpy.abs", "numpy.clip" ] ]
CityU-AIM-Group/GFBS
[ "d71361243f1bcf699e1a20b312b05fe0be4dfd6d" ]
[ "gfbs.py" ]
[ "'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport logging\nimport datasets\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchsummary import summary\nimport sys\nsys.path.append('./differentiable_models')\n\nimport torchvision.transforms as transforms\n\nimport os\nimport copy\nimport argparse\nfrom differentiable_models import *\nfrom utils import save_model, MODEL_DICT, CosineAnnealingLR\nimport time\nos.environ['CUDA_VISIBLE_DEVICE']='0'\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\nlogging.basicConfig(filename=str(__file__)[:-3]+'_'+time.strftime(\"%Y-%m-%d_%H:%M:%S\", time.localtime())+'.log', \n level=logging.INFO, \n format=LOG_FORMAT, \n filemode='w')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nlogging.getLogger('').addHandler(console)\n\n# MODEL_DICT = {'dresnet20': DResNet20(), 'dresnet56': DResNet56(), 'vgg16': VGG('VGG16'), 'maskedvgg16': MaskedVGG('MaskedVGG16')}\n# TODO: Make apis for mobilenetv2\n# Data\ndef load_data(dataset, bs):\n print('==> Preparing data..{}'.format(dataset))\n if dataset == \"cifar10\":\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n trainset = datasets.CIFAR10(root='./data', type='train+val', transform=transform_train, download=True)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, shuffle=True, num_workers=2)\n\n valset = datasets.CIFAR10(root='./data', type='val', transform=transform_test, download=True)\n valloader = torch.utils.data.DataLoader(valset, batch_size=200, shuffle=False, num_workers=2)\n\n testset = datasets.CIFAR10(root='./data', type='test', transform=transform_test, download=True)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n elif dataset == \"cifar100\":\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),\n ])\n trainset = torchvision.datasets.CIFAR100(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=bs, shuffle=True, num_workers=2)\n\n valset = torchvision.datasets.CIFAR100(\n root='./data', train=True, download=True, transform=transform_train)\n valloader = torch.utils.data.DataLoader(\n valset, batch_size=bs, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR100(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=100, shuffle=False, num_workers=2)\n return trainloader, valloader, testloader\n\n# Training\ndef gfbs(net, optimizer, dtloader, epoch):\n net.train()\n # global bn_grad_list\n for i, (data, target) in enumerate(dtloader):\n data, target = data.to(device), target.to(device)\n\n optimizer.zero_grad()\n output = net(data)\n loss = F.cross_entropy(output, target)\n loss.backward()\n # optimizer.step()\n pred = output.max(1)[1]\n acc = (pred == target).float().mean()\n\n if i % 100 == 0:\n logging.info('Train Epoch: {} [{}/{}]\\tLoss: {:.6f}, Accuracy: {:.4f}'.format(\n epoch, i, len(dtloader), loss.item(), acc.item()\n ))\n \n break\n return net\n\ndef test(net, dataloader):\n net.eval()\n test_loss = 0\n correct = 0\n global best_accuracy\n\n with torch.no_grad():\n for data, target in dataloader:\n data, target = data.to(device), target.to(device)\n output = net(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item()\n pred = output.max(1)[1]\n correct += (pred == target).float().sum().item()\n\n test_loss /= len(dataloader.dataset)\n acc = correct / len(dataloader.dataset)\n # logging.info('Val set: Average loss: {:.4f}, Accuracy: {:.4f}\\n'.format(\n # test_loss, acc\n # ))\n if acc > best_accuracy:\n best_accuracy = acc\n return test_loss, acc\n\ndef mapper(acc_sort_idx, bn_dict, pruned_ratio):\n toremove = acc_sort_idx[int(len(acc_sort_idx) * (1 - pruned_ratio)):]\n bgn = 0\n modules = []\n for bn_layer in bn_dict:\n end = bgn + bn_dict[bn_layer] - 1\n channel_idx = [bgn, end]\n bgn = end + 1\n modules.append(channel_idx)\n mapper = list(bn_dict.keys())\n dic = {}\n dic_count = {}\n for channel in toremove:\n for idx, layer in enumerate(modules):\n if layer[0] <= channel <= layer[1]:\n if mapper[idx] not in dic:\n dic[mapper[idx]] = []\n dic[mapper[idx]].append(channel - layer[0])\n dic_count[mapper[idx]] = 1\n else:\n if dic_count[mapper[idx]] < bn_dict[mapper[idx]] - 1: # Avoid Layer Collapse\n dic[mapper[idx]].append(channel - layer[0])\n dic_count[mapper[idx]] += 1\n return dic, dic_count\n\ndef bn2gatevgg(name):\n l = name.split('.')\n if len(l) == 4: # module.features.1.weight ==> module.features.2.gate\n l[-1] = 'gate'\n l[-2] = str(int(l[-2]) + 1)\n elif len(l) == 3: # module.features.1 ==> module.features.2.gate\n l[-1] = str(int(l[-1]) + 1)\n l.append('gate')\n return '.'.join(l)\n\ndef bn2gateresnet(name):\n l = name.split('.')\n if 'bn' in l[-2]: # module.layer2.4.bn2.weight => module.layer2.4.gate2.gate\n l[-1] = 'gate'\n l[-2] = str('gate' + l[-2][-1])\n elif 'bn' in l[-1]: # module.layer1.0.bn1 ==> module.layer1.0.gate1.gate\n l[-1] = str('gate' + l[-1][-1]) # bn1 ==> gate1\n l.append('gate')\n return '.'.join(l)\n\ndef bn2mobilenet(name):\n l = name.split('.')\n if 'bn' in l[-2]: # module.layers.2.bn2.weight => module.layer.2.gate2.gate\n l[-1] = 'gate'\n l[-2] = str('gate' + l[-2][-1])\n elif 'bn' in l[-1]: # module.layer.1.bn1 ==> module.layer.1.gate1.gate\n l[-1] = str('gate' + l[-1][-1]) # bn1 ==> gate1\n l.append('gate')\n return '.'.join(l)\n\ndef check_remaining_channels(net):\n total_remain = 0\n total = 0\n for name, param in net.named_parameters():\n if 'gate' in name:\n # print(\"Name: {}, Channels: {}\".format(name, torch.sum(param).item()))\n total_remain += torch.sum(param)\n total += param.shape[1]\n # print('channels remain: %d' % total_remain.item())\n percentage = 100 * (total_remain.item()/total)\n print('Remaining percentage: %.2f (%d/%d)' % (percentage, total_remain.item(), total))\n return total_remain, total_remain.item()/total\n\n\ndef finetune_train(net, optimizer, dataloader, epoch):\n net.train()\n for i, (data, target) in enumerate(trainloader):\n data, target = data.to(device), target.to(device)\n\n optimizer.zero_grad()\n output = net(data)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n pred = output.max(1)[1]\n acc = (pred == target).float().mean()\n\n if i % 100 == 0:\n logging.info('Train Epoch: {} [{}/{}]\\tLoss: {:.6f}, Accuracy: {:.4f}'.format(\n epoch, i, len(trainloader), loss.item(), acc.item()\n ))\n\n# Testing\ndef finetune_test(net, dataloader, optimizer, scheduler, epoch, name, ratio, smooth):\n net.eval()\n test_loss = 0\n correct = 0\n global best_accuracy\n # global acc_list_for_betas\n remaining, true_ratio = check_remaining_channels(net)\n pruned_ratio = 1. - true_ratio\n with torch.no_grad():\n for data, target in dataloader:\n data, target = data.to(device), target.to(device)\n output = net(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item()\n pred = output.max(1)[1]\n correct += (pred == target).float().sum().item()\n\n test_loss /= len(dataloader.dataset)\n acc = correct / len(dataloader.dataset)\n logging.info('Val set: Average loss: {:.4f}, Accuracy: {:.4f}\\n'.format(\n test_loss, acc\n ))\n if acc > best_accuracy:\n if ratio - pruned_ratio < 0.05: # Do not save the models before the 50th epoch\n logging.info('Pruned ratio / Desire pruned ratio: {:.2f}/{:.2f}'.format(pruned_ratio, ratio))\n logging.info(\"Saving the model.....\")\n if smooth:\n if not os.path.isdir('checkpoints/'+name+'/smooth/'):\n os.mkdir('checkpoints/'+name+'/smooth/')\n save_path = './checkpoints/'+name+'/smooth/'+'gfbsv2_smooth_epoch_{}_acc_{:.4f}_rem_{:.2f}_des_{:.2f}.pth'.format(str(epoch), acc, true_ratio, ratio)\n else:\n save_path = './checkpoints/'+name+'/gfbsv2_epoch_{}_acc_{:.4f}_r_{:.2f}.pth'.format(str(epoch), acc, true_ratio)\n save_model(net, acc, epoch, optimizer, scheduler, name, save_path)\n \n best_accuracy = acc\n\ndef finetune_and_evaluate(net, trainloader, testloader, optimizer, scheduler, total_epochs, start_epoch, name, ratio, smooth):\n # Without +1: 0~299; with +1: 1~300\n for epoch in range(start_epoch + 1, total_epochs + 1):\n\n # Run one epoch for both train and test\n logging.info(\"Epoch {}/{}\".format(epoch, total_epochs))\n print(\"Current time:\", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n\n # compute number of batches in one epoch(one full pass over the training set)\n finetune_train(net, optimizer, trainloader, epoch)\n # logging.info('Learning_rate: %.4f' % (scheduler.get_last_lr()[0]))\n # writer.add_scalar('Learning_rate', epoch, torch.tensor(scheduler.get_last_lr()))\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n finetune_test(net, testloader, optimizer, scheduler, epoch, name, ratio, smooth)\n\nclass Hook():\n def __init__(self, module, backward=False):\n if backward==False:\n self.hook = module.register_forward_hook(self.hook_fn)\n else:\n self.hook = module.register_backward_hook(self.hook_fn)\n def hook_fn(self, module, grad_in, grad_out):\n grad_weight = grad_in[1]\n grad_weight = grad_weight.abs().clone().detach()\n grad_weight_norm = grad_weight / torch.norm(grad_weight, 2)\n grad_bias = grad_in[2]\n grad_bias = grad_bias.clone().detach()\n grad_bias_norm = grad_bias / torch.norm(grad_bias, 2)\n self.grad_weight = grad_weight_norm\n self.grad_bias = grad_bias_norm\n self.grad_out = grad_out\n def close(self):\n self.hook.remove()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Evaluate the Importance of Each Layer')\n parser.add_argument('--net', default='gatevgg16', type=str, choices=list(MODEL_DICT.keys()), help='network used for training')\n parser.add_argument('--dataset', default='cifar10', type=str, help='dataset used for training')\n parser.add_argument('--lr', default=0.1, type=float, help='learning rate')\n parser.add_argument('--p', default=0.7, type=float, help='channel pruned ratio')\n parser.add_argument('--smooth', '-s', action='store_true', help='finetune the network for 10 epochs after the pruning of each layer')\n parser.add_argument('--beta', default=True, help='use beta information or not')\n parser.add_argument('--beta_only', action='store_true', help='use beta information or not')\n parser.add_argument('--cosine', action='store_true', help='use cosine lr rate')\n parser.add_argument('--w_beta', default=0.05, type=float, help='beta weight')\n parser.add_argument('--checkpoint', default=None, help='The checkpoint file (.pth)')\n parser.add_argument('--epochs', default=300, help='The number of training epochs')\n parser.add_argument('--bs', default=128, type=int, help='The number of training epochs')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n args = parser.parse_args()\n logging.info(args)\n\n trainloader, valloader, testloader = load_data(args.dataset, args.bs)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n net = MODEL_DICT[args.net].to(device)\n logging.info('==> Building model.. '+str(args.net)+str(net))\n\n # Setup best accuracy for comparing and model checkpoints\n best_accuracy = 0.90\n torch.manual_seed(args.seed)\n if device == 'cuda':\n net = torch.nn.DataParallel(net)\n torch.backends.cudnn.benchmark = True\n torch.cuda.manual_seed(args.seed)\n \n if args.checkpoint:\n logging.info('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoints'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(args.checkpoint)\n net.load_state_dict(checkpoint['net'])\n\n optimizer1 = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.1,\n momentum=0.9, weight_decay=1e-4)\n\n # global bn_grad_list\n # bn_grad_list = []\n # handle_list = []\n hook_dic = {}\n for name, m in net.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n # print('instance weight', m.weight)\n hook_dic[name] = Hook(m, backward=True)\n print('Register hook for ', name, ' with size ', m.weight.size())\n # m.weight.register_hook(grad_hook)\n # handle = m.weight.register_hook(grad_hook)\n # handle_list.append(handle)\n\n net = gfbs(net, optimizer1, valloader, 1)\n if 'vgg' in args.net:\n gamma_grad_dict = dict((bn2gatevgg(k), v.grad_weight) for (k, v) in hook_dic.items())\n elif 'resnet' in args.net:\n gamma_grad_dict = dict((bn2gateresnet(k), v.grad_weight) for (k, v) in hook_dic.items())\n elif 'mobilenet' in args.net:\n gamma_grad_dict = dict((bn2mobilenet(k), v.grad_weight) for (k, v) in hook_dic.items())\n else:\n raise NotImplementedError\n # for hook in hook_dic.keys():\n # print(hook)\n # print(hook_dic[hook].grad_weight)\n # for handle in handle_list: # Release memory by removing handles\n # handle.remove()\n # for named_params in net.named_parameters():\n # name, params = named_params\n # print(name)\n ############################## Get toremove channels ##############################\n gate_dict = {}\n gamma_dict = {}\n gamma_list = []\n beta_dict = {}\n beta_list = {}\n for named_params in net.named_parameters():\n name, params = named_params\n if 'weight' in name and len(params.shape) == 1:\n if 'vgg' in args.net:\n name = bn2gatevgg(name)\n elif 'resnet' in args.net:\n name = bn2gateresnet(name)\n\n gate_dict[name] = int(params.shape[0])\n gammas_pre_norm = params.abs().clone().detach()\n \n gammas_norm = gammas_pre_norm / torch.norm(gammas_pre_norm, 2) # Norm gamma\n gamma_dict[name] = gammas_norm\n if 'bias' in name and len(params.shape) == 1 and 'classifier' not in name:\n if 'vgg' in args.net:\n name = bn2gatevgg(name)\n elif 'resnet' in args.net:\n name = bn2gateresnet(name)\n if name in gamma_dict.keys(): # Remove Conv2d biases\n betas_pre_norm = params.clone().detach()\n betas_norm = betas_pre_norm / torch.norm(betas_pre_norm, 2)\n beta_dict[name] = betas_norm\n\n logging.info(gate_dict)\n\n # *************** Get GFBS for BN ****************\n # gamma_dict: a dict that contains the gamma values for each layer\n # gamma_grad_dict: a dict that contains the grad of the gamma values for each layer\n for gate_layer in gamma_dict.keys():\n assert gate_layer in gamma_grad_dict\n assert gate_layer in beta_dict\n assert gamma_dict[gate_layer].shape == gamma_grad_dict[gate_layer].shape == beta_dict[gate_layer].shape\n taylor = gamma_dict[gate_layer].cpu() * gamma_grad_dict[gate_layer].cpu()\n ############################### Whether to employ beta information\n if args.beta:\n taylor += beta_dict[gate_layer].cpu() * args.w_beta\n if args.beta_only:\n taylor = beta_dict[gate_layer].cpu() * args.w_beta\n gamma_list.extend(taylor)\n # **************************************************\n \n # Release hooks\n for k, hook in hook_dic.items():\n hook.close()\n\n # bn_grad_list = bn_grad_list[::-1]\n # for lyrs, chns in enumerate(bn_grad_list):\n # assert chns.shape[0] == gate_dict[list(gate_dict)[lyrs]] # Check if the hooks match the gate params\n # bn_grad_list = [layer_bn_grad.abs() / torch.norm(layer_bn_grad.abs(), 2) for layer_bn_grad in bn_grad_list] # Norm grad\n # bn_grad_list_cat = torch.cat(bn_grad_list).tolist()\n # gamma_list = [a * b for a, b in zip(gamma_list, bn_grad_list_cat)]\n # bn_grad_list.clear()\n\n acc_sort_idx = sorted(range(len(gamma_list)), key=lambda k: gamma_list[k])[::-1]\n # logging.info(acc_sort_idx)\n remove_dic, remove_dic_count = mapper(acc_sort_idx, gate_dict, args.p)\n ######################### If print to remove channels, uncommit this line #############\n # logging.info(remove_dic)\n #######################################################################################\n logging.info('Total remove channel amount: ' + str(dict(sorted(remove_dic_count.items()))))\n\n ############################## Remove channels ##############################\n remove_dic = dict(sorted(remove_dic.items()))\n for gate_layer in remove_dic:\n assert remove_dic_count[gate_layer] < gate_dict[gate_layer]\n for channel in remove_dic[gate_layer]:\n net.state_dict()[gate_layer][:, channel, :, :].data.copy_(torch.zeros_like(net.state_dict()[gate_layer][:, channel, :, :].data))\n if args.smooth:\n logging.info('Finished removing channels in '+str(gate_layer)+', finetune for several epochs.')\n if args.dataset == 'cifar10':\n best_accuracy = 0.90\n optimizer2 = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.01,\n momentum=0.9, weight_decay=5e-4)\n scheduler = MultiStepLR(optimizer2, milestones=[5, 10], gamma=0.1)\n elif args.dataset == 'cifar100':\n best_accuracy = 0.68\n optimizer2 = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.01,\n momentum=0.9, weight_decay=5e-4)\n scheduler = MultiStepLR(optimizer2, milestones=[5, 10], gamma=0.2)\n if args.cosine:\n scheduler = CosineAnnealingLR(optimizer2, 5, 30, 5, 0.0)\n start_epoch = 0\n finetune_and_evaluate(net, trainloader, testloader, optimizer2, scheduler, total_epochs=30, start_epoch=start_epoch, name=args.net, ratio=args.p, smooth=args.smooth)\n logging.info('Best accuracy: {:.4f}'.format(best_accuracy))\n\n logging.info('Finished removing')\n optimizer2 = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.01,\n momentum=0.9, weight_decay=5e-4)\n scheduler = MultiStepLR(optimizer2, milestones=[10, 15], gamma=0.2)\n if args.cosine:\n scheduler = CosineAnnealingLR(optimizer2, 5, 30, 5, 0.0)\n start_epoch = 0\n finetune_and_evaluate(net, trainloader, testloader, optimizer2, scheduler, total_epochs=45, start_epoch=start_epoch, name=args.net, ratio=args.p, smooth=args.smooth)\n logging.info('Best accuracy: {:.4f}'.format(best_accuracy))\n # state = {\n # 'net': net.state_dict(),\n # }\n # if not os.path.isdir('checkpoints/'):\n # os.mkdir('checkpoints/')\n # if not os.path.isdir('checkpoints/'+args.net):\n # os.mkdir('checkpoints/'+args.net)\n # save_path = 'checkpoints/'+args.net+'/gfbspruned.pth'\n \n if not args.smooth:\n if args.dataset == 'cifar100':\n best_accuracy = 0.68\n acc_list_for_betas = []\n optimizer3 = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=0.1,\n momentum=0.9, weight_decay=5e-4)\n scheduler = MultiStepLR(optimizer3, milestones=[90, 180, 240], gamma=0.2)\n start_epoch = 0\n finetune_and_evaluate(net, trainloader, testloader, optimizer3, scheduler, total_epochs=args.epochs, start_epoch=start_epoch, name=args.net, ratio=args.p, smooth=args.smooth)\n logging.info('Best accuracy: {:.4f}'.format(best_accuracy))\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.norm", "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.utils.data.DataLoader", "torch.sum", "torch.no_grad", "torch.cuda.is_available", "torch.nn.DataParallel" ] ]
Leputa/MANN-meta-learning
[ "c2f32ca6db9bf545ffb9c0da0bc9fb0d93074055" ]
[ "mann/mann_cell.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom mann.utils.tf_utils import variable_one_hot\n\n\nclass MANNCell():\n def __init__(self, lstm_size, memory_size, memory_dim, nb_reads,\n gamma=0.95, reuse=False):\n self.lstm_size = lstm_size\n self.memory_size = memory_size\n self.memory_dim = memory_dim\n self.nb_reads = nb_reads\n self.reuse = reuse\n self.step = 0\n self.gamma = gamma\n self.controller = tf.nn.rnn_cell.BasicLSTMCell(self.lstm_size)\n\n\n def __call__(self, input, prev_state):\n M_prev, r_prev, controller_state_prev, wu_prev, wr_prev = \\\n prev_state[\"M\"], prev_state[\"read_vector\"], prev_state[\"controller_state\"], prev_state[\"wu\"], prev_state[\"wr\"]\n\n controller_input = tf.concat([input, wr_prev], axis=-1)\n with tf.variable_scope(\"controller\", reuse=self.reuse):\n controller_hidden_t, controller_state_t = self.controller(controller_input, controller_state_prev)\n\n parameter_dim_per_head = self.memory_dim * 2 + 1\n parameter_total_dim = parameter_dim_per_head * self.nb_reads # []\n\n with tf.variable_scope(\"o2p\", reuse=(self.step > 0) or self.reuse):\n parameter = tf.layers.dense(\n inputs=controller_hidden_t,\n units=parameter_total_dim,\n kernel_initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1),\n bias_initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1),\n )\n\n indices_prev, wlu_prev = self.least_used(wu_prev)\n\n k = tf.tanh(parameter[:, 0:self.nb_reads * self.memory_dim], name=\"k\")\n a = tf.tanh(parameter[:, self.nb_reads * self.memory_dim: 2 * self.nb_reads * self.memory_dim], name=\"a\")\n sig_alpha = tf.sigmoid(parameter[:, -self.nb_reads: ], name=\"sig_alpha\")\n\n wr_t = self.read_head_addressing(k, M_prev)\n ww_t = self.write_head_addressing(sig_alpha, wr_prev, wlu_prev)\n\n wu_t = self.gamma * wu_prev + tf.reduce_sum(wr_t, axis=1) + tf.reduce_sum(ww_t, axis=1)\n\n # \"Prior to writing to memory, the least used memory location set to zero\"\n M_t = M_prev * tf.expand_dims(1. - tf.one_hot(indices_prev[:, -1], self.memory_size), dim=2)\n M_t = M_t + tf.matmul(tf.transpose(ww_t, perm=[0,2,1]), tf.reshape(a, shape=(a.get_shape()[0], self.nb_reads, self.memory_dim)))\n\n r_t = tf.reshape(tf.matmul(wr_t, M_t), shape=(r_prev.get_shape()[0], self.nb_reads * self.memory_dim))\n\n\n state = {\n \"M\": M_t,\n \"read_vector\": r_t,\n \"controller_state\": controller_state_t,\n \"wu\": wu_t,\n \"wr\": tf.reshape(wr_t, shape=(wr_t.get_shape()[0], self.nb_reads * self.memory_size)),\n }\n\n NTM_output = tf.concat([controller_hidden_t, r_t], axis=-1)\n\n self.step += 1\n return NTM_output, state\n\n\n def read_head_addressing(self, k, M_prev, eps=1e-8):\n with tf.variable_scope(\"read_head_addressing\"):\n k = tf.reshape(k, shape=(k.get_shape()[0], self.nb_reads, self.memory_dim))\n inner_product = tf.matmul(k, tf.transpose(M_prev, [0, 2, 1]))\n\n k_norm = tf.sqrt(tf.expand_dims(tf.reduce_sum(tf.square(k), 2), 2))\n M_norm = tf.sqrt(tf.expand_dims(tf.reduce_sum(tf.square(M_prev), 2), 1))\n\n norm_product = k_norm * M_norm\n K = inner_product / (norm_product + eps)\n return tf.nn.softmax(K)\n\n def write_head_addressing(self, sig_alpha, wr_prev, wlu_prev):\n with tf.variable_scope(\"write_head_addressing\"):\n sig_alpha = tf.expand_dims(sig_alpha, axis=-1)\n wr_prev = tf.reshape(wr_prev, shape=(wr_prev.get_shape()[0], self.nb_reads, self.memory_size))\n return sig_alpha * wr_prev + (1. - sig_alpha) * tf.expand_dims(wlu_prev, axis=1)\n\n def least_used(self, w_u):\n _, indices = tf.nn.top_k(w_u, k=self.memory_size)\n wlu = tf.cast(tf.slice(indices, [0, self.memory_size - self.nb_reads], [w_u.get_shape()[0], self.nb_reads]), dtype=tf.int32)\n wlu = tf.reduce_sum(tf.one_hot(wlu, self.memory_size), axis=1)\n return indices, wlu\n\n def zero_state(self, batch_size, dtype):\n with tf.variable_scope(\"init\", reuse=self.reuse):\n M_0 = tf.constant(np.ones([batch_size, self.memory_size, self.memory_dim]) * 1e-6, dtype=tf.float32)\n r_0 = tf.zeros(shape=(batch_size, self.nb_reads * self.memory_dim))\n controller_state_0 = self.controller.zero_state(batch_size, dtype)\n wu_0 = variable_one_hot(shape=(batch_size, self.memory_size))\n wr_0 = variable_one_hot(shape=(batch_size, self.memory_size * self.nb_reads))\n\n state ={\n \"M\": M_0,\n \"read_vector\":r_0,\n \"controller_state\": controller_state_0,\n \"wu\": wu_0,\n \"wr\": wr_0,\n }\n\n return state\n" ]
[ [ "tensorflow.nn.rnn_cell.BasicLSTMCell", "tensorflow.matmul", "tensorflow.concat", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.zeros", "tensorflow.random_uniform_initializer", "tensorflow.reduce_sum", "tensorflow.sigmoid", "tensorflow.expand_dims", "numpy.ones", "tensorflow.nn.top_k", "tensorflow.tanh", "tensorflow.one_hot", "tensorflow.square", "tensorflow.variable_scope" ] ]
fabiofumarola/pytorch-toolbelt
[ "bf2915c4c4ac602b9e177a8d8ff796e8268b5def" ]
[ "pytorch_toolbelt/inference/tta.py" ]
[ "\"\"\"Implementation of GPU-friendly test-time augmentation for image segmentation and classification tasks.\n\nDespite this is called test-time augmentation, these method can be used at training time as well since all\ntransformation written in PyTorch and respect gradients flow.\n\"\"\"\nfrom functools import partial\nfrom typing import Tuple, List, Optional, Union, Callable, Dict\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn.functional import interpolate\nfrom ..utils.support import pytorch_toolbelt_deprecated\nfrom . import functional as F\n\n__all__ = [\n \"MultiscaleTTAWrapper\",\n \"TTAWrapper\",\n \"GeneralizedTTA\",\n \"d2_image_augment\",\n \"d2_image_deaugment\",\n \"d4_image2label\",\n \"d4_image2mask\",\n \"d4_image_augment\",\n \"d4_image_deaugment\",\n \"ms_image_augment\",\n \"ms_image_deaugment\",\n \"fivecrop_image2label\",\n \"fliplr_image2label\",\n \"fliplr_image2mask\",\n \"fliplr_image_augment\",\n \"fliplr_image_deaugment\",\n \"flips_augment\",\n \"flips_deaugment\",\n \"tencrop_image2label\",\n]\n\nMaybeStrOrCallable = Optional[Union[str, Callable]]\n\n\ndef fliplr_image2label(model: nn.Module, image: Tensor) -> Tensor:\n \"\"\"Test-time augmentation for image classification that averages predictions\n for input image and horizontally flipped one.\n\n :param model:\n :param image:\n :return:\n \"\"\"\n output = model(image) + model(F.torch_fliplr(image))\n one_over_2 = float(1.0 / 2.0)\n return output * one_over_2\n\n\ndef fivecrop_image2label(model: nn.Module, image: Tensor, crop_size: Tuple) -> Tensor:\n \"\"\"Test-time augmentation for image classification that takes five crops out of input tensor (4 on corners and central)\n and averages predictions from them.\n\n :param model: Classification model\n :param image: Input image tensor\n :param crop_size: Crop size. Must be smaller than image size\n :return: Averaged logits\n \"\"\"\n image_height, image_width = int(image.size(2)), int(image.size(3))\n crop_height, crop_width = crop_size\n\n assert crop_height <= image_height\n assert crop_width <= image_width\n\n bottom_crop_start = image_height - crop_height\n right_crop_start = image_width - crop_width\n crop_tl = image[..., :crop_height, :crop_width]\n crop_tr = image[..., :crop_height, right_crop_start:]\n crop_bl = image[..., bottom_crop_start:, :crop_width]\n crop_br = image[..., bottom_crop_start:, right_crop_start:]\n\n assert crop_tl.size(2) == crop_height\n assert crop_tr.size(2) == crop_height\n assert crop_bl.size(2) == crop_height\n assert crop_br.size(2) == crop_height\n\n assert crop_tl.size(3) == crop_width\n assert crop_tr.size(3) == crop_width\n assert crop_bl.size(3) == crop_width\n assert crop_br.size(3) == crop_width\n\n center_crop_y = (image_height - crop_height) // 2\n center_crop_x = (image_width - crop_width) // 2\n\n crop_cc = image[..., center_crop_y : center_crop_y + crop_height, center_crop_x : center_crop_x + crop_width]\n assert crop_cc.size(2) == crop_height\n assert crop_cc.size(3) == crop_width\n\n output = model(crop_tl) + model(crop_tr) + model(crop_bl) + model(crop_br) + model(crop_cc)\n one_over_5 = float(1.0 / 5.0)\n return output * one_over_5\n\n\ndef tencrop_image2label(model: nn.Module, image: Tensor, crop_size: Tuple) -> Tensor:\n \"\"\"Test-time augmentation for image classification that takes five crops out of input tensor (4 on corners and central)\n and averages predictions from them and from their horisontally-flipped versions (10-Crop TTA).\n\n :param model: Classification model\n :param image: Input image tensor\n :param crop_size: Crop size. Must be smaller than image size\n :return: Averaged logits\n \"\"\"\n image_height, image_width = int(image.size(2)), int(image.size(3))\n crop_height, crop_width = crop_size\n\n assert crop_height <= image_height\n assert crop_width <= image_width\n\n bottom_crop_start = image_height - crop_height\n right_crop_start = image_width - crop_width\n crop_tl = image[..., :crop_height, :crop_width]\n crop_tr = image[..., :crop_height, right_crop_start:]\n crop_bl = image[..., bottom_crop_start:, :crop_width]\n crop_br = image[..., bottom_crop_start:, right_crop_start:]\n\n assert crop_tl.size(2) == crop_height\n assert crop_tr.size(2) == crop_height\n assert crop_bl.size(2) == crop_height\n assert crop_br.size(2) == crop_height\n\n assert crop_tl.size(3) == crop_width\n assert crop_tr.size(3) == crop_width\n assert crop_bl.size(3) == crop_width\n assert crop_br.size(3) == crop_width\n\n center_crop_y = (image_height - crop_height) // 2\n center_crop_x = (image_width - crop_width) // 2\n\n crop_cc = image[..., center_crop_y : center_crop_y + crop_height, center_crop_x : center_crop_x + crop_width]\n assert crop_cc.size(2) == crop_height\n assert crop_cc.size(3) == crop_width\n\n output = (\n model(crop_tl)\n + model(F.torch_fliplr(crop_tl))\n + model(crop_tr)\n + model(F.torch_fliplr(crop_tr))\n + model(crop_bl)\n + model(F.torch_fliplr(crop_bl))\n + model(crop_br)\n + model(F.torch_fliplr(crop_br))\n + model(crop_cc)\n + model(F.torch_fliplr(crop_cc))\n )\n\n one_over_10 = float(1.0 / 10.0)\n return output * one_over_10\n\n\ndef fliplr_image2mask(model: nn.Module, image: Tensor) -> Tensor:\n \"\"\"Test-time augmentation for image segmentation that averages predictions\n for input image and vertically flipped one.\n\n For segmentation we need to reverse the transformation after making a prediction\n on augmented input.\n :param model: Model to use for making predictions.\n :param image: Model input.\n :return: Arithmetically averaged predictions\n \"\"\"\n output = model(image) + F.torch_fliplr(model(F.torch_fliplr(image)))\n one_over_2 = float(1.0 / 2.0)\n return output * one_over_2\n\n\ndef d4_image2label(model: nn.Module, image: Tensor) -> Tensor:\n \"\"\"Test-time augmentation for image classification that averages predictions\n of all D4 augmentations applied to input image.\n\n :param model: Model to use for making predictions.\n :param image: Model input.\n :return: Arithmetically averaged predictions\n \"\"\"\n output = model(image)\n\n for aug in [F.torch_rot90, F.torch_rot180, F.torch_rot270]:\n x = model(aug(image))\n output = output + x\n\n image = F.torch_transpose(image)\n\n for aug in [F.torch_none, F.torch_rot90, F.torch_rot180, F.torch_rot270]:\n x = model(aug(image))\n output = output + x\n\n one_over_8 = float(1.0 / 8.0)\n return output * one_over_8\n\n\ndef d4_image2mask(model: nn.Module, image: Tensor) -> Tensor:\n \"\"\"Test-time augmentation for image segmentation that averages predictions\n of all D4 augmentations applied to input image.\n\n For segmentation we need to reverse the augmentation after making a prediction\n on augmented input.\n :param model: Model to use for making predictions.\n :param image: Model input.\n :return: Arithmetically averaged predictions\n \"\"\"\n output = model(image)\n\n for aug, deaug in zip(\n [F.torch_rot90, F.torch_rot180, F.torch_rot270], [F.torch_rot270, F.torch_rot180, F.torch_rot90]\n ):\n x = deaug(model(aug(image)))\n output += x\n\n image = F.torch_transpose(image)\n\n for aug, deaug in zip(\n [F.torch_none, F.torch_rot90, F.torch_rot180, F.torch_rot270],\n [F.torch_none, F.torch_rot270, F.torch_rot180, F.torch_rot90],\n ):\n x = deaug(model(aug(image)))\n output += F.torch_transpose(x)\n\n one_over_8 = float(1.0 / 8.0)\n output *= one_over_8\n return output\n\n\ndef fliplr_image_augment(image: Tensor) -> Tensor:\n \"\"\"\n Augment input tensor using flip from left to right\n Args:\n image: Tensor of [B,C,H,W] shape\n\n Returns:\n Tensor of [B * 2, C, H, W] shape with:\n - Original tensor rotated by 180 degrees\n - Horisonalty-flipped tensor\n\n \"\"\"\n return torch.cat([image, F.torch_fliplr(image)], dim=0)\n\n\ndef fliplr_image_deaugment(image: Tensor, reduction: MaybeStrOrCallable = \"mean\") -> Tensor:\n \"\"\"\n Deaugment input tensor (output of the model) assuming the input was fliplr-augmented image (See fliplr_image_augment).\n Args:\n image: Tensor of [B * 2, C, H, W] shape\n reduction: Reduction model for aggregating outputs. Default is taking mean.\n\n Returns:\n Tensor of [B, C, H, W] shape if reduction is not None or \"none\", otherwise returns de-augmented tensor of\n [2, B, C, H, W] shape\n \"\"\"\n assert image.size(0) % 2 == 0\n\n b1, b2 = torch.chunk(image, 2)\n\n image: Tensor = torch.stack([b1, F.torch_fliplr(b2)])\n\n if reduction == \"mean\":\n image = image.mean(dim=0)\n if reduction == \"sum\":\n image = image.sum(dim=0)\n if callable(reduction):\n image = reduction(image, dim=0)\n return image\n\n\ndef d2_image_augment(image: Tensor) -> Tensor:\n \"\"\"\n Augment input tensor using D2 symmetry group\n Args:\n image: Tensor of [B,C,H,W] shape\n\n Returns:\n Tensor of [B * 8, C, H, W] shape with:\n - Original tensor\n - Original tensor rotated by 180 degrees\n - Horisonalty-flipped tensor\n - Vertically-flipped tensor\n\n \"\"\"\n return torch.cat([image, F.torch_rot180(image), F.torch_fliplr(image), F.torch_flipud(image),], dim=0,)\n\n\ndef d2_image_deaugment(image: Tensor, reduction: MaybeStrOrCallable = \"mean\") -> Tensor:\n \"\"\"\n Deaugment input tensor (output of the model) assuming the input was D2-augmented image (See d2_augment).\n Args:\n image: Tensor of [B * 4, C, H, W] shape\n reduction: Reduction model for aggregating outputs. Default is taking mean.\n\n Returns:\n Tensor of [B, C, H, W] shape if reduction is not None or \"none\", otherwise returns de-augmented tensor of\n [4, B, C, H, W] shape\n \"\"\"\n assert image.size(0) % 4 == 0\n\n b1, b2, b3, b4 = torch.chunk(image, 4)\n\n image: Tensor = torch.stack(\n [b1, F.torch_rot180(b2), F.torch_fliplr(b3), F.torch_flipud(b4),]\n )\n\n if reduction == \"mean\":\n image = image.mean(dim=0)\n if reduction == \"sum\":\n image = image.sum(dim=0)\n if callable(reduction):\n image = reduction(image, dim=0)\n return image\n\n\ndef d4_image_augment(image: Tensor) -> Tensor:\n \"\"\"\n Augment input tensor using D4 symmetry group\n Args:\n image: Tensor of [B,C,H,W] shape\n\n Returns:\n Tensor of [B * 8, C, H, W] shape with:\n - Original tensor\n - Original tensor rotated by 90 degrees\n - Original tensor rotated by 180 degrees\n - Original tensor rotated by 180 degrees\n - Transposed tensor\n - Transposed tensor rotated by 90 degrees\n - Transposed tensor rotated by 180 degrees\n - Transposed tensor rotated by 180 degrees\n\n \"\"\"\n if image.size(2) != image.size(3):\n raise ValueError(\n f\"Input tensor must have number of rows equal to number of cols. \"\n f\"Got input tensor of shape {image.size()}\"\n )\n image_t = F.torch_transpose(image)\n return torch.cat(\n [\n image,\n F.torch_rot90_cw(image),\n F.torch_rot180(image),\n F.torch_rot90_ccw(image),\n image_t,\n F.torch_rot90_cw(image_t),\n F.torch_rot180(image_t),\n F.torch_rot90_ccw(image_t),\n ],\n dim=0,\n )\n\n\ndef d4_image_deaugment(image: Tensor, reduction: MaybeStrOrCallable = \"mean\") -> Tensor:\n \"\"\"\n Deaugment input tensor (output of the model) assuming the input was D4-augmented image (See d4_augment).\n Args:\n image: Tensor of [B * 8, C, H, W] shape\n average: If True performs averaging of 8 outputs, otherwise - summation.\n\n Returns:\n Tensor of [B, C, H, W] shape if reduction is not None or \"none\", otherwise returns de-augmented tensor of\n [4, B, C, H, W] shape\n\n \"\"\"\n assert image.size(0) % 8 == 0\n\n b1, b2, b3, b4, b5, b6, b7, b8 = torch.chunk(image, 8)\n\n image: Tensor = torch.stack(\n [\n b1,\n F.torch_rot90_ccw(b2),\n F.torch_rot180(b3),\n F.torch_rot90_cw(b4),\n F.torch_transpose(b5),\n F.torch_rot90_ccw_transpose(b6),\n F.torch_rot180_transpose(b7),\n F.torch_rot90_cw_transpose(b8),\n ]\n )\n\n if reduction == \"mean\":\n image = image.mean(dim=0)\n if reduction == \"sum\":\n image = image.sum(dim=0)\n if callable(reduction):\n image = reduction(image, dim=0)\n return image\n\n\ndef flips_augment(image: Tensor) -> Tensor:\n \"\"\"\n Augment input tensor by adding vertically and horizontally flipped images to it.\n\n Args:\n image: Tensor of [B,C,H,W] shape\n\n Returns:\n Tensor of [B * 3, C, H, W] shape with:\n - Original tensor\n - Horizontally-flipped tensor\n - Vertically-flipped\n\n \"\"\"\n return torch.cat([image, F.torch_fliplr(image), F.torch_flipud(image)], dim=0)\n\n\ndef flips_deaugment(image: Tensor, reduction: MaybeStrOrCallable = \"mean\",) -> Tensor:\n \"\"\"\n Deaugment input tensor (output of the model) assuming the input was flip-augmented image (See flips_augment).\n Args:\n image: Tensor of [B * 3, C, H, W] shape\n reduction: If True performs averaging of 8 outputs, otherwise - summation.\n\n Returns:\n Tensor of [B, C, H, W] shape.\n \"\"\"\n batch_size: int = image.shape[0] // 3\n image: Tensor = torch.stack(\n [\n image[batch_size * 0 : batch_size * 1],\n F.torch_fliplr(image[batch_size * 1 : batch_size * 2]),\n F.torch_flipud(image[batch_size * 2 : batch_size * 3]),\n ]\n )\n\n if reduction == \"mean\":\n image = image.mean(dim=0)\n if reduction == \"sum\":\n image = image.sum(dim=0)\n if callable(reduction):\n image = reduction(image, dim=0)\n return image\n\n\n@pytorch_toolbelt_deprecated(\"This class is deprecated. Please use GeneralizedTTA instead\")\nclass TTAWrapper(nn.Module):\n def __init__(self, model: nn.Module, tta_function, **kwargs):\n super().__init__()\n self.model = model\n self.tta = partial(tta_function, **kwargs)\n\n def forward(self, *input):\n return self.tta(self.model, *input)\n\n\ndef ms_image_augment(\n image: Tensor, size_offsets: List[Union[int, Tuple[int, int]]], mode=\"bilinear\", align_corners=True\n) -> List[Tensor]:\n \"\"\"\n Multi-scale image augmentation. This function create list of resized tensors from the input one.\n \"\"\"\n batch_size, channels, rows, cols = image.size()\n augmented_inputs = []\n for offset in size_offsets:\n # TODO: Add support of tuple (row_offset, col_offset)\n if offset == 0:\n augmented_inputs.append(image)\n else:\n scale_size = rows + offset, cols + offset\n scaled_input = torch.nn.functional.interpolate(\n image, size=scale_size, mode=mode, align_corners=align_corners\n )\n augmented_inputs.append(scaled_input)\n return augmented_inputs\n\n\ndef ms_image_deaugment(\n images: List[Tensor],\n size_offsets: List[Union[int, Tuple[int, int]]],\n reduction: MaybeStrOrCallable = \"mean\",\n mode: str = \"bilinear\",\n align_corners: bool = True,\n) -> Tensor:\n if len(images) != len(size_offsets):\n raise ValueError(\"Number of images must be equal to number of size offsets\")\n\n deaugmented_outputs = []\n for image, offset in zip(images, size_offsets):\n if offset == 0:\n deaugmented_outputs.append(image)\n else:\n batch_size, channels, rows, cols = image.size()\n # TODO: Add support of tuple (row_offset, col_offset)\n original_size = rows - offset, cols - offset\n scaled_image = torch.nn.functional.interpolate(\n image, size=original_size, mode=mode, align_corners=align_corners\n )\n deaugmented_outputs.append(scaled_image)\n\n deaugmented_outputs = torch.stack(deaugmented_outputs)\n if reduction == \"mean\":\n deaugmented_outputs = deaugmented_outputs.mean(dim=0)\n if reduction == \"sum\":\n deaugmented_outputs = deaugmented_outputs.sum(dim=0)\n if callable(reduction):\n deaugmented_outputs = reduction(deaugmented_outputs, dim=0)\n\n return deaugmented_outputs\n\n\n@pytorch_toolbelt_deprecated(\"This class is deprecated. Please use MultiscaleTTA instead\")\nclass MultiscaleTTAWrapper(nn.Module):\n \"\"\"\n Multiscale TTA wrapper module\n \"\"\"\n\n def __init__(self, model: nn.Module, scale_levels: List[float] = None, size_offsets: List[int] = None):\n \"\"\"\n Initialize multi-scale TTA wrapper\n\n :param model: Base model for inference\n :param scale_levels: List of additional scale levels,\n e.g: [0.5, 0.75, 1.25]\n \"\"\"\n super().__init__()\n assert scale_levels or size_offsets, \"Either scale_levels or size_offsets must be set\"\n assert not (scale_levels and size_offsets), \"Either scale_levels or size_offsets must be set\"\n self.model = model\n self.scale_levels = scale_levels\n self.size_offsets = size_offsets\n\n def forward(self, input: Tensor) -> Tensor:\n h = input.size(2)\n w = input.size(3)\n\n out_size = h, w\n output = self.model(input)\n\n if self.scale_levels:\n for scale in self.scale_levels:\n dst_size = int(h * scale), int(w * scale)\n input_scaled = interpolate(input, dst_size, mode=\"bilinear\", align_corners=False)\n output_scaled = self.model(input_scaled)\n output_scaled = interpolate(output_scaled, out_size, mode=\"bilinear\", align_corners=False)\n output += output_scaled\n output /= 1.0 + len(self.scale_levels)\n elif self.size_offsets:\n for offset in self.size_offsets:\n dst_size = int(h + offset), int(w + offset)\n input_scaled = interpolate(input, dst_size, mode=\"bilinear\", align_corners=False)\n output_scaled = self.model(input_scaled)\n output_scaled = interpolate(output_scaled, out_size, mode=\"bilinear\", align_corners=False)\n output += output_scaled\n output /= 1.0 + len(self.size_offsets)\n\n return output\n\n\nclass GeneralizedTTA(nn.Module):\n \"\"\"\n Example:\n tta_model = GeneralizedTTA(model,\n augment_fn=tta.d2_image_augment,\n deaugment_fn={\n OUTPUT_MASK_KEY: tta.d2_image_deaugment,\n OUTPUT_EDGE_KEY: tta.d2_image_deaugment,\n },\n\n\n Notes:\n Input tensors must be square for D2/D4 or similar types of augmentation\n \"\"\"\n\n def __init__(\n self,\n model: Union[nn.Module, nn.DataParallel],\n augment_fn: Union[Callable, Dict[str, Callable], List[Callable]],\n deaugment_fn: Union[Callable, Dict[str, Callable], List[Callable]],\n ):\n super().__init__()\n self.model = model\n self.augment_fn = augment_fn\n self.deaugment_fn = deaugment_fn\n\n def forward(self, *input, **kwargs):\n # Augment & forward\n if isinstance(self.augment_fn, dict):\n if len(input) != 0:\n raise ValueError(\"Input for GeneralizedTTA must be exactly one tensor\")\n augmented_inputs = dict(\n (key, augment(value)) for (key, value), augment in zip(kwargs.items(), self.augment_fn)\n )\n outputs = self.model(**augmented_inputs)\n elif isinstance(self.augment_fn, (list, tuple)):\n if len(kwargs) != 0:\n raise ValueError(\"Input for GeneralizedTTA must be exactly one tensor\")\n augmented_inputs = [augment(x) for x, augment in zip(input, self.augment_fn)]\n outputs = self.model(*augmented_inputs)\n else:\n if len(input) != 1:\n raise ValueError(\"Input for GeneralizedTTA must be exactly one tensor\")\n if len(kwargs) != 0:\n raise ValueError(\"Input for GeneralizedTTA must be exactly one tensor\")\n augmented_input = self.augment_fn(input[0])\n outputs = self.model(augmented_input)\n\n # Deaugment outputs\n if isinstance(self.deaugment_fn, dict):\n if not isinstance(outputs, dict):\n raise ValueError(\"Output of the model must be a dict\")\n\n deaugmented_output = dict((key, self.deaugment_fn[key](value)) for (key, value) in outputs.items())\n elif isinstance(self.deaugment_fn, (list, tuple)):\n if not isinstance(outputs, (dict, tuple)):\n raise ValueError(\"Output of the model must be a dict\")\n\n deaugmented_output = [deaugment(value) for value, deaugment in zip(outputs, self.deaugment_fn)]\n else:\n deaugmented_output = self.deaugment_fn(outputs)\n\n return deaugmented_output\n" ]
[ [ "torch.chunk", "torch.nn.functional.interpolate", "torch.stack" ] ]
eneelo/chaospy
[ "da31792fa5d58c231a77e04234b32cb90df6c6d8" ]
[ "chaospy/quadrature/sparse_grid.py" ]
[ "\"\"\"Smolyak sparse grid constructor.\"\"\"\nfrom collections import defaultdict\nfrom itertools import product\n\nimport numpy\nfrom scipy.special import comb\n\nimport numpoly\nimport chaospy\n\n\ndef construct_sparse_grid(\n order,\n dist,\n growth=None,\n recurrence_algorithm=\"stieltjes\",\n rule=\"gaussian\",\n tolerance=1e-10,\n scaling=3,\n n_max=5000,\n):\n \"\"\"\n Smolyak sparse grid constructor.\n\n Args:\n order (int, numpy.ndarray):\n The order of the grid. If ``numpy.ndarray``, it overrides both\n ``dim`` and ``skew``.\n dist (chaospy.distributions.baseclass.Distribution):\n The distribution which density will be used as weight function.\n growth (bool, None):\n If True sets the growth rule for the quadrature rule to only\n include orders that enhances nested samples. Defaults to the same\n value as ``sparse`` if omitted.\n recurrence_algorithm (str):\n Name of the algorithm used to generate abscissas and weights in\n case of Gaussian quadrature scheme. If omitted, ``analytical`` will\n be tried first, and ``stieltjes`` used if that fails.\n rule (str):\n Rule for generating abscissas and weights. Either done with\n quadrature rules, or with random samples with constant weights.\n tolerance (float):\n The allowed relative error in norm between two quadrature orders\n before method assumes convergence.\n scaling (float):\n A multiplier the adaptive order increases with for each step\n quadrature order is not converged. Use 0 to indicate unit\n increments.\n n_max (int):\n The allowed number of quadrature points to use in approximation.\n\n Returns:\n (numpy.ndarray, numpy.ndarray):\n Abscissas and weights created from sparse grid rule. Flatten such\n that ``abscissas.shape == (len(dist), len(weights))``.\n\n Example:\n >>> distribution = chaospy.J(\n ... chaospy.Normal(0, 1), chaospy.Uniform(-1, 1))\n >>> abscissas, weights = construct_sparse_grid(1, distribution)\n >>> abscissas.round(4)\n array([[-1. , 0. , 0. , 0. , 1. ],\n [ 0. , -0.5774, 0. , 0.5774, 0. ]])\n >>> weights.round(4)\n array([ 0.5, 0.5, -1. , 0.5, 0.5])\n >>> abscissas, weights = construct_sparse_grid([2, 1], distribution)\n >>> abscissas.round(2)\n array([[-1.73, -1. , -1. , -1. , 0. , 1. , 1. , 1. , 1.73],\n [ 0. , -0.58, 0. , 0.58, 0. , -0.58, 0. , 0.58, 0. ]])\n >>> weights.round(2)\n array([ 0.17, 0.25, -0.5 , 0.25, 0.67, 0.25, -0.5 , 0.25, 0.17])\n \"\"\"\n orders = order*numpy.ones(len(dist), dtype=int)\n growth = True if growth is None else growth\n\n assert isinstance(dist, chaospy.Distribution), \"dist must be chaospy.Distribution\"\n dist = dist if isinstance(dist, (chaospy.J, chaospy.Iid)) else chaospy.J(dist)\n\n if isinstance(rule, str):\n rule = (rule,)*len(dist)\n\n x_lookup, w_lookup = _construct_lookup(\n orders=orders,\n dists=dist,\n growth=growth,\n recurrence_algorithm=recurrence_algorithm,\n rules=rule,\n tolerance=tolerance,\n scaling=scaling,\n n_max=n_max,\n )\n collection = _construct_collection(\n order, dist, x_lookup, w_lookup)\n\n abscissas = sorted(collection)\n weights = numpy.array([collection[key] for key in abscissas])\n abscissas = numpy.array(abscissas).T\n return abscissas, weights\n\n\ndef _construct_collection(\n orders,\n dist,\n x_lookup,\n w_lookup,\n):\n \"\"\"Create a collection of {abscissa: weight} key-value pairs.\"\"\"\n order = numpy.min(orders)\n skew = orders-order\n\n # Indices and coefficients used in the calculations\n indices = numpoly.glexindex(\n order-len(dist)+1, order+1, dimensions=len(dist))\n coeffs = numpy.sum(indices, -1)\n coeffs = (2*((order-coeffs+1) % 2)-1)*comb(len(dist)-1, order-coeffs)\n\n collection = defaultdict(float)\n for bidx, coeff in zip(indices+skew, coeffs.tolist()):\n abscissas = [value[idx] for idx, value in zip(bidx, x_lookup)]\n weights = [value[idx] for idx, value in zip(bidx, w_lookup)]\n for abscissa, weight in zip(product(*abscissas), product(*weights)):\n collection[abscissa] += numpy.prod(weight)*coeff\n\n return collection\n\n\ndef _construct_lookup(\n orders,\n dists,\n growth,\n recurrence_algorithm,\n rules,\n tolerance,\n scaling,\n n_max,\n):\n \"\"\"\n Create abscissas and weights look-up table so values do not need to be\n re-calculatated on the fly.\n \"\"\"\n x_lookup = []\n w_lookup = []\n for max_order, dist, rule in zip(orders, dists, rules):\n x_lookup.append([])\n w_lookup.append([])\n for order in range(max_order+1):\n (abscissas,), weights = chaospy.generate_quadrature(\n order=order,\n dist=dist,\n growth=growth,\n recurrence_algorithm=recurrence_algorithm,\n rule=rule,\n tolerance=tolerance,\n scaling=scaling,\n n_max=n_max,\n )\n x_lookup[-1].append(abscissas)\n w_lookup[-1].append(weights)\n return x_lookup, w_lookup\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.prod", "numpy.min" ] ]
HekpoMaH/algorithmic-concepts-reasoning
[ "17c87faad2fbe8481455de34a145a4753a2fe4d0" ]
[ "algos/models/algorithm_coloring.py" ]
[ "import torch\r\nfrom overrides import overrides\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom algos.models import AlgorithmBase\r\n\r\nclass AlgorithmColoring(AlgorithmBase):\r\n '''\r\n The overriding in this class comes from the fact that (only) the parallel\r\n coloring ouptuts are not the same dimension as its inputs: The algorithm\r\n takes as input priorities, in addition to current coloring on the last step\r\n but is not required to output these priorities.\r\n '''\r\n\r\n def __init__(self,\r\n latent_features,\r\n node_features,\r\n concept_features,\r\n output_features,\r\n algo_processor,\r\n dataset_class,\r\n inside_class,\r\n dataset_root,\r\n dataset_kwargs,\r\n bias=False,\r\n **kwargs):\r\n\r\n super(AlgorithmColoring, self).__init__(\r\n latent_features,\r\n node_features,\r\n concept_features,\r\n output_features,\r\n algo_processor,\r\n dataset_class,\r\n inside_class,\r\n dataset_root,\r\n dataset_kwargs,\r\n bias=bias,\r\n **kwargs)\r\n self.bit_encoder = nn.Sequential(\r\n nn.Linear(node_features - output_features, latent_features), # NOTE I'm using the fact that bits are not part of the output features\r\n nn.LeakyReLU(),\r\n nn.Linear(latent_features, latent_features), # NOTE I'm using the fact that bits are not part of the output features\r\n nn.LeakyReLU()\r\n )\r\n self.color_encoder = nn.Sequential(\r\n nn.Linear(output_features, latent_features), # NOTE Output features = colors we have+1\r\n nn.LeakyReLU()\r\n )\r\n self.node_encoder = nn.Sequential(\r\n nn.Linear(3*latent_features, latent_features, bias=bias),\r\n nn.LeakyReLU(),\r\n nn.Linear(latent_features, latent_features, bias=bias),\r\n nn.LeakyReLU()\r\n )\r\n\r\n @overrides\r\n def encode_nodes(self, inp, last_latent):\r\n def bin2dec(b, bits):\r\n mask = 2 ** torch.arange(bits - 1, -1, -1).to(b.device, b.dtype)\r\n return torch.sum(mask * b, -1)\r\n colors = inp[:, :self.output_features]\r\n bits = inp[:, self.output_features:] # NOTE Again, as above, bits are not parts of output\r\n encoded_colors = self.color_encoder(colors)\r\n encoded_bits = self.bit_encoder(bits)\r\n inp = torch.cat((encoded_colors, encoded_bits), dim=-1)\r\n return self.node_encoder(torch.cat((inp, last_latent), dim=-1))\r\n\r\n @overrides\r\n def get_input_from_output(self, output, batch=None):\r\n output = type(self).get_outputs(output)\r\n return torch.cat((F.one_hot(output.long().squeeze(-1), num_classes=self.output_features).float(), batch.priorities), dim=-1)\r\n\r\n @overrides\r\n def get_output_loss(self, output_logits, target):\r\n assert F.cross_entropy(output_logits, target.long(), reduction='sum') >= 0\r\n return F.cross_entropy(output_logits, target.long(), reduction='sum', weight=getattr(self.dataset, 'class_weights', None)) #FIXME remove the s-es if you want them back\r\n\r\n @overrides\r\n def get_step_output(self, batch, step):\r\n output_logits = torch.where(F.one_hot(batch.y[step, :], self.output_features).bool(), 1e3, -1e3)\r\n output = (output_logits > 0).float()\r\n return output_logits, output\r\n\r\n @overrides\r\n def set_initial_last_states(self, batch):\r\n super().set_initial_last_states(batch)\r\n # the parent method initialises the initial last output logits\r\n # from the input logits. The output logits should instead\r\n # be 1+|number of colours|\r\n self.last_output_logits = torch.where(batch.x[0, :, :self.output_features].bool(), 1e3, -1e3)\r\n self.last_output = (self.last_output_logits > 0).float()\r\n\r\n @staticmethod\r\n @overrides\r\n def get_outputs(outputs):\r\n return outputs.argmax(dim=-1)\r\n\r\nif __name__ == '__main__':\r\n print(list(dir(AlgorithmColoring)))\r\n" ]
[ [ "torch.cat", "torch.sum", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.arange", "torch.nn.functional.one_hot" ] ]
Juanjoglvz/DataMining
[ "9dfdb9b973ce7ff724784299862db5cb19cc1284" ]
[ "src/data/make_dataset.py" ]
[ "import pandas as pd\n\n\n\n# We read the csv\ndf118 = pd.read_csv(\"../../data/raw/0118.csv\")\ndf218 = pd.read_csv(\"../../data/raw/0218.csv\")\ndf318 = pd.read_csv(\"../../data/raw/0318.csv\")\ndf418 = pd.read_csv(\"../../data/raw/0418.csv\")\ndf417 = pd.read_csv(\"../../data/raw/0417.csv\")\ndf518 = pd.read_csv(\"../../data/raw/0518.csv\")\ndf517 = pd.read_csv(\"../../data/raw/0517.csv\")\ndf617 = pd.read_csv(\"../../data/raw/0617.csv\")\ndf717 = pd.read_csv(\"../../data/raw/0717.csv\")\ndf817 = pd.read_csv(\"../../data/raw/0817.csv\")\ndf917 = pd.read_csv(\"../../data/raw/0917.csv\")\ndf1017 = pd.read_csv(\"../../data/raw/1017.csv\")\ndf1117 = pd.read_csv(\"../../data/raw/1117.csv\")\ndf1217 = pd.read_csv(\"../../data/raw/1217.csv\")\n\n\n# We new Create a new dataframe adding everything\ndf = df118.append([df218, df318, df418, df518, df417, df517, df617, df717, df817, df917, df1017, df1117, df1217])\n\nprint(df.columns)\n\ndf = df.drop('Unnamed: 29', axis = 1)\n\ndf.to_csv('../../data/interim/vuelos.csv', index=False)" ]
[ [ "pandas.read_csv" ] ]
ashavish/tensorflow-yolov3
[ "a4940cdc918a2b2e6fe518231e0704fa70f0e5ea" ]
[ "predict_image_rotated.py" ]
[ "import cv2\nimport os\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nfrom core.config import cfg\nfrom core.yolov3 import YOLOV3\nimport core.utils as utils\nfrom PIL import Image\n\nclass YoloTest(object):\n def __init__(self):\n self.input_size = cfg.TEST.INPUT_SIZE\n self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE\n self.classes = utils.read_class_names(cfg.YOLO.CLASSES)\n self.num_classes = len(self.classes)\n self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))\n self.score_threshold = cfg.TEST.SCORE_THRESHOLD\n self.iou_threshold = cfg.TEST.IOU_THRESHOLD\n self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY\n self.annotation_path = cfg.TEST.ANNOT_PATH\n self.weight_file = cfg.TEST.WEIGHT_FILE\n self.write_image = cfg.TEST.WRITE_IMAGE\n self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH\n self.show_label = cfg.TEST.SHOW_LABEL\n\n with tf.name_scope('input'):\n self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')\n self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')\n\n model = YOLOV3(self.input_data, self.trainable)\n self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox\n\n with tf.name_scope('ema'):\n ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)\n\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n self.saver = tf.train.Saver(ema_obj.variables_to_restore())\n self.saver.restore(self.sess, self.weight_file)\n\n def predict(self, image):\n org_image = np.copy(image)\n org_h, org_w, _ = org_image.shape\n\n image_data = utils.image_preporcess(image, [self.input_size, self.input_size])\n image_data = image_data[np.newaxis, ...]\n\n pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(\n [self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],\n feed_dict={\n self.input_data: image_data,\n self.trainable: False\n }\n )\n pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 6 + self.num_classes)),\n np.reshape(pred_mbbox, (-1, 6 + self.num_classes)),\n np.reshape(pred_lbbox, (-1, 6 + self.num_classes))], axis=0)\n bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)\n bboxes = utils.nms(bboxes, self.iou_threshold)\n return bboxes\n\n def predict_image(self,image_path):\n original_image = cv2.imread(image_path)\n bboxes_pr = self.predict(original_image)\n for bbox in bboxes_pr:\n coor = np.array(bbox[:4], dtype=np.int32)\n theta = bbox[4]\n score = bbox[5]\n class_ind = int(bbox[6])\n class_name = self.classes[class_ind]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax = list(map(str, coor)) \n image = utils.draw_bbox(original_image, bboxes_pr)\n image = Image.fromarray(image)\n image.save(\"predictions.jpg\")\n\n def evaluate(self):\n predicted_dir_path = './mAP/predicted'\n ground_truth_dir_path = './mAP/ground-truth'\n if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)\n if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)\n if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)\n os.mkdir(predicted_dir_path)\n os.mkdir(ground_truth_dir_path)\n os.mkdir(self.write_image_path)\n\n with open(self.annotation_path, 'r') as annotation_file:\n for num, line in enumerate(annotation_file):\n annotation = line.strip().split()\n image_path = annotation[0]\n image_name = image_path.split('/')[-1]\n image = cv2.imread(image_path)\n bbox_data_gt = np.array([list(map(lambda x: float(x), box.split(','))) for box in annotation[1:]])\n\n if len(bbox_data_gt) == 0:\n bboxes_gt=[]\n classes_gt=[]\n else:\n bboxes_gt,classes_gt = bbox_data_gt[:, :5],bbox_data_gt[:, 5]\n ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')\n\n print('=> ground truth of %s:' % image_name)\n num_bbox_gt = len(bboxes_gt)\n with open(ground_truth_path, 'w') as f:\n for i in range(num_bbox_gt):\n class_name = self.classes[classes_gt[i]]\n xmin, ymin, xmax, ymax,theta = list(map(str, bboxes_gt[i]))\n bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax,theta]) + '\\n'\n f.write(bbox_mess)\n print('\\t' + str(bbox_mess).strip())\n print('=> predict result of %s:' % image_name)\n predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')\n bboxes_pr = self.predict(image)\n\n if self.write_image:\n image = utils.draw_bbox(image, bboxes_pr, show_label=self.show_label)\n cv2.imwrite(self.write_image_path+image_name, image)\n\n with open(predict_result_path, 'w') as f:\n for bbox in bboxes_pr:\n coor = np.array(bbox[:5], dtype=np.int32)\n score = bbox[5]\n class_ind = int(bbox[6])\n class_name = self.classes[class_ind]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax,theta = list(map(str, coor))\n bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax,theta]) + '\\n'\n f.write(bbox_mess)\n print('\\t' + str(bbox_mess).strip())\n\n\n\nif __name__ == '__main__': \n image_name = \"test.jpg\"\n yolo = YoloTest()\n yolo.predict_image(image_name)\n\n" ]
[ [ "numpy.reshape", "tensorflow.placeholder", "tensorflow.ConfigProto", "numpy.copy", "tensorflow.train.ExponentialMovingAverage", "tensorflow.name_scope", "numpy.array" ] ]
floresdwm/thesis
[ "8780455c27a9f96e3c4e49c629ef1e31c8849604" ]
[ "Classes/Plots.py" ]
[ "import os\nimport seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.patches import Ellipse\nimport matplotlib.transforms as transforms\nimport Classes.RegressionAnalysis as pls\nimport Classes.Configurations as cfg\nfrom sklearn.model_selection import train_test_split\n\n\ndef x_data(df, file_name, fig):\n plt.figure(fig, figsize=(8, 4), dpi=125)\n plt.subplot(121)\n plt.title('All X Data')\n plt.plot(pd.DataFrame(df).transpose())\n df_std_positive = pd.DataFrame(df).transpose().mean(axis=1) + pd.DataFrame(df).transpose().std(axis=1)\n df_std_negative = pd.DataFrame(df).transpose().mean(axis=1) - pd.DataFrame(df).transpose().std(axis=1)\n plt.subplot(122)\n plt.title('X Data average +/- STD')\n plt.plot(df_std_positive, color='indianred', linestyle='dashed')\n plt.plot(pd.DataFrame(df).transpose().mean(axis=1), color='black')\n plt.plot(df_std_negative, color='indianred', linestyle='dashed')\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'x_plot_.png')\n\n\ndef x_data_epo(df, file_name, fig, plot_name):\n plt.figure(fig, figsize=(8, 4), dpi=125)\n plt.subplot(121)\n plt.title(plot_name)\n plt.plot(pd.DataFrame(df).transpose())\n df_std_positive = pd.DataFrame(df).transpose().mean(axis=1) + pd.DataFrame(df).transpose().std(axis=1)\n df_std_negative = pd.DataFrame(df).transpose().mean(axis=1) - pd.DataFrame(df).transpose().std(axis=1)\n plt.subplot(122)\n plt.title('X Data average +/- STD')\n plt.plot(df_std_positive, color='indianred', linestyle='dashed')\n plt.plot(pd.DataFrame(df).transpose().mean(axis=1), color='black')\n plt.plot(df_std_negative, color='indianred', linestyle='dashed')\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'x_plot_.png')\n\ndef x_data_outliers(df_clean, df_out, file_name):\n plt.figure(20, figsize=(8, 4), dpi=125)\n plt.subplot(221)\n plt.title('All X Data OUTLIERS')\n plt.plot(pd.DataFrame(df_out).transpose())\n df_std_positive = pd.DataFrame(df_out).transpose().mean(axis=1) + pd.DataFrame(df_out).transpose().std(axis=1)\n df_std_negative = pd.DataFrame(df_out).transpose().mean(axis=1) - pd.DataFrame(df_out).transpose().std(axis=1)\n plt.subplot(222)\n plt.title('X Data OUTLIERS average +/- STD')\n plt.plot(df_std_positive, color='indianred', linestyle='dashed')\n plt.plot(pd.DataFrame(df_out).transpose().mean(axis=1), color='black')\n plt.plot(df_std_negative, color='indianred', linestyle='dashed')\n plt.subplot(223)\n plt.title('All X Data selected')\n plt.plot(pd.DataFrame(df_clean).transpose())\n df_std_positive3 = pd.DataFrame(df_clean).transpose().mean(axis=1) + pd.DataFrame(df_clean).transpose().std(axis=1)\n df_std_negative3 = pd.DataFrame(df_clean).transpose().mean(axis=1) - pd.DataFrame(df_clean).transpose().std(axis=1)\n plt.subplot(224)\n plt.title('X Data selected average +/- STD')\n plt.plot(df_std_positive3, color='indianred', linestyle='dashed')\n plt.plot(pd.DataFrame(df_clean).transpose().mean(axis=1), color='black')\n plt.plot(df_std_negative3, color='indianred', linestyle='dashed')\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'x_plot_outliers.png')\n\n\ndef correlation_matrix(df, file_name):\n if df.shape[1] > 1:\n correlations = df.corr()\n figname = sns.clustermap(data=correlations, annot=True, cmap='Greens')\n plt.title('Pearson (r)')\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'y_correlation_map_.png')\n\n\ndef kde(df):\n fig3 = plt.figure()\n sns.pairplot(df)\n\n\ndef pca_x_data(df, labels, file_name):\n plt.figure()\n df = pd.DataFrame(df)\n p4 = sns.scatterplot(x=df.iloc[:, 0], y=df.iloc[:, 1], data=df, hue=labels.iloc[:, 0])\n p4.axhline(y=0, color='k', linewidth=1)\n p4.axvline(x=0, color='k', linewidth=1)\n p4.set(title='PCA data X')\n p4 = sns.kdeplot(df.iloc[:, 0], df.iloc[:, 1], linewidth=0.5)\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'x_plot_.png')\n\n\ndef pca_y_data(df, labels):\n plt.figure()\n p4 = sns.scatterplot(x=df.iloc[:, 0], y=df.iloc[:, 1], data=df, hue=labels.iloc[:, 0])\n p4.axhline(y=0, color='k', linewidth=1)\n p4.axvline(x=0, color='k', linewidth=1)\n p4.set(title='PCA data Y')\n p4 = sns.kdeplot(df.iloc[:, 0], df.iloc[:, 1], linewidth=0.5)\n\n\ndef pca_xy_and_outliers(df_cleaned_x, df_cleaned_xy, df_outliers_x, df_outliers_xy, labels, n_out_x, n_out_y, file_name):\n plt.figure(5, figsize=(10, 5), dpi=125)\n plt.subplot(121)\n f1 = sns.scatterplot(x='PC1', y='PC2', data=df_cleaned_x, label=\"Selected data\")\n f1.set(title='PCA OF (X)' + ' outliers: ' + str(n_out_x))\n f1.axhline(y=0, color='k', linewidth=1)\n f1.axvline(x=0, color='k', linewidth=1)\n confidence_ellipse(df_cleaned_x.PC1, df_cleaned_x.PC2, f1, edgecolor='red', label=\"Confidence interval\")\n f1 = sns.scatterplot(x='PC1', y='PC2', data=df_outliers_x, color='red', label=\"Outlier data\")\n df_cleaned_x = pd.DataFrame(df_cleaned_x)\n # f1 = sns.kdeplot(df_cleaned_x.PC1, df_cleaned_x.PC2, linestyles=\"--\")\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n\n plt.subplot(122)\n f3 = sns.scatterplot(x='PC1', y='PC2', data=df_cleaned_xy, label=\"Selected data\")\n f3.set(title='PCA OF (Y)' + ' outliers: ' + str(n_out_y))\n f3.axhline(y=0, color='k', linewidth=1)\n f3.axvline(x=0, color='k', linewidth=1)\n confidence_ellipse(df_cleaned_xy.PC1, df_cleaned_xy.PC2, f3, edgecolor='red', label=\"Confidence interval\")\n f3 = sns.scatterplot(x='PC1', y='PC2', data=df_outliers_xy, color='red', label=\"Outlier data\")\n # f1 = sns.kdeplot(df_cleaned_xy.PC1, df_cleaned_xy.PC2, linestyles=\"--\")\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls',\n '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + 'pca_outliers_plot_.png')\n\n\ndef pca_xy_and_outliersb(df_cleaned_x, df_cleaned_xy, df_outliers_x, df_outliers_xy, labels, n_out_x, n_out_y):\n plt.figure(5, figsize=(8, 4), dpi=125)\n plt.subplot(221)\n f1 = sns.scatterplot(x='PC1', y='PC2',data=df_cleaned_x)\n f1.set(title='CLEANED PCA OF (X)')\n f1.axhline(y=0, color='k', linewidth=1)\n f1.axvline(x=0, color='k', linewidth=1)\n plt.subplot(222)\n f2 = sns.scatterplot(x='PC1', y='PC2', data=df_outliers_x)\n f2.set(title='OUTLIERS PCA OF (X)' + ' outliers: ' + str(n_out_x))\n f2.axhline(y=0, color='k', linewidth=1)\n f2.axvline(x=0, color='k', linewidth=1)\n\n plt.subplot(223)\n f3 = sns.scatterplot(x='PC1', y='PC2', data=df_cleaned_xy)\n f3.set(title='CLEANED PCA OF (Y)')\n f3.axhline(y=0, color='k', linewidth=1)\n f3.axvline(x=0, color='k', linewidth=1)\n plt.subplot(224)\n f4 = sns.scatterplot(x='PC1', y='PC2', data=df_outliers_xy)\n f4.set(title='OUTLIERS PCA OF (Y)' + ' outliers: ' + str(n_out_y))\n f4.axhline(y=0, color='k', linewidth=1)\n f4.axvline(x=0, color='k', linewidth=1)\n\n\ndef confidence_ellipse(x, y, ax, n_std=cfg.confidence_pca, facecolor='none', **kwargs):\n \"\"\"\n Create a plot of the covariance confidence ellipse of *x* and *y*.\n\n Parameters\n ----------\n x, y : array-like, shape (n, )\n Input data.\n\n ax : matplotlib.axes.Axes\n The axes object to draw the ellipse into.\n\n n_std : float\n The number of standard deviations to determine the ellipse's radiuses.\n\n Returns\n -------\n matplotlib.patches.Ellipse\n\n Other parameters\n ----------------\n kwargs : `~matplotlib.patches.Patch` properties\n \"\"\"\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0),\n width=ell_radius_x * 2,\n height=ell_radius_y * 2,\n facecolor=facecolor,\n **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)\n\n\ndef scatter_pred_ref(x, y):\n for i in range(y.shape[1]):\n figname = 'fig' + str(i+10)\n figname = plt.figure(i+10, figsize=(8.27, 11.69), dpi=100)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n i_model, i_rmse, i_r2 = pls.do_pls(x, y.iloc[:, i], 0.7)\n yhats = pls.run_pls(x, i_model)\n plt.scatter(y.iloc[:, i], pls.run_pls(x, i_model))\n plt.plot(y.iloc[:, i], y.iloc[:, i], color='red', linewidth=0.5)\n labels = list(y.columns)\n plt.title('N: ' + str(len(y)) + ' ' + str(labels[i]) + ' R²: ' + str(\"%.4f\" % i_r2) + ' RMSE: ' + str(\"%.4f\" % i_rmse))\n\n\ndef scatter_x_y(x_train, y_train, x_test, y_test, summary_models, models, file_name):\n n_train = y_train.shape[0]\n n_test = y_test.shape[0]\n for i in range(y_train.shape[1]):\n figname = 'fig' + str(i+10)\n figname = plt.figure(i+10, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_train, models[i])\n plt.scatter(y_train.iloc[:, i], yhats, label=\"Train data\")\n plt.plot(y_train.iloc[:, i], y_train.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n\n for i in range(y_test.shape[1]):\n figname = 'fig' + str(i+10)\n figname = plt.figure(i+10, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_test, models[i])\n plt.scatter(y_test.iloc[:, i], yhats, label=\"Test data\")\n plt.plot(y_test.iloc[:, i], y_test.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n figname.text(0, 0, str(summary_models.iloc[:, i].to_string()) +\n str(' N Train: ' + str(n_train) + str(' N Test: ' + str(n_test))), color='red', fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + str(figname) + str(i) + '_.png')\n\n\ndef scatter_x_y_n(x_train, y_train, x_test, y_test, summary_models, models, file_name, fig, paramname):\n n_train = y_train.shape[0]\n n_test = y_test.shape[0]\n for i in range(y_train.shape[1]):\n figname = 'fig ' + str(paramname)\n figname = plt.figure(figname, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_train, models[i])\n plt.scatter(y_train.iloc[:, i], yhats, label=\"Train data\")\n plt.plot(y_train.iloc[:, i], y_train.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n\n for i in range(y_test.shape[1]):\n figname = 'fig ' + str(paramname)\n figname = plt.figure(figname, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_test, models[i])\n plt.scatter(y_test.iloc[:, i], yhats, label=\"Test data\")\n plt.plot(y_test.iloc[:, i], y_test.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n figname.text(0, 0, str(summary_models.iloc[0:5, 0].to_string()) +\n str(' N Train: ' + str(n_train) + str(' N Test: ' + str(n_test))), color='red', fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '') + '/Figures' + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + str(figname) + str(i) + '_.png')\n\n\ndef med_x_pred_sigma(x_data, y_data, summary_models, models, file_name, fig, paramname):\n x_test, x_train, y_test, y_train = train_test_split(x_data, y_data, test_size=cfg.train_split_percentage, random_state=0)\n n_train = y_train.shape[0]\n n_test = y_test.shape[0]\n for i in range(y_train.shape[1]):\n figname = 'fig' + str(10+fig)\n figname = plt.figure(10+fig, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_train, models[i])\n plt.scatter(y_train.iloc[:, i], yhats, label=\"Train data\")\n plt.plot(y_train.iloc[:, i], y_train.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n\n for i in range(y_test.shape[1]):\n figname = 'fig' + str(10+fig)\n figname = plt.figure(10+fig, figsize=(8, 4), dpi=125)\n plt.xlabel('Y measured')\n plt.ylabel('Y predicted')\n yhats = pls.run_pls(x_test, models[i])\n plt.scatter(y_test.iloc[:, i], yhats, label=\"Test data\")\n plt.plot(y_test.iloc[:, i], y_test.iloc[:, i], color='red', linewidth=0.5)\n plt.legend(loc='best', shadow=False, scatterpoints=1)\n figname.text(0, 0, str(summary_models.iloc[0:5, 0].to_string()) +\n str(' N Train: ' + str(n_train) + str(' N Test: ' + str(n_test))), color='red', fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})\n path = os.path.expanduser(\"~/Desktop\") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '') + '/'\n if not os.path.exists(path):\n os.makedirs(path)\n plt.savefig(path + str(figname) + str(i) + paramname + '_.png')\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.patches.Ellipse", "numpy.sqrt", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.transforms.Affine2D", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "numpy.cov", "numpy.mean", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure" ] ]
footprint-network/footprint-analytics
[ "5de4932ce1c21860785edcce90ffdf097b6f9921" ]
[ "footprint_airflow/dags/dex/etl_dex_pair_basic.py" ]
[ "from basic.etl_basic import ETLBasic\nfrom config import project_config\nimport os.path\nimport pandas as pd\nimport moment\nfrom models import BigQueryCheckPoint\nfrom datetime import timedelta, datetime\nfrom gql import Client, gql\nfrom gql.transport.aiohttp import AIOHTTPTransport\nfrom utils.upload_csv_to_gsc import upload_csv_to_gsc\nfrom utils.date_util import DateUtil\nfrom models import MonitorDashBoard\n\ndt = None\n\n\ndef slug(s: str):\n return s.lower().replace(' ', '-')\n\n\ndef load_dex_info():\n global dt\n if dt is not None:\n return dt\n dags_folder = project_config.dags_folder\n df = pd.read_csv(os.path.join(dags_folder, './dex/dex-protocol-info.csv'))\n dt = {}\n for info in df.to_dict(orient='records'):\n dt[info['name'] + info['protocol']] = {\n 'name': info['new name'],\n 'protocol_id': info['protocol_id']\n }\n return dt\n\n\nclass ETLDexPairBasic(ETLBasic):\n network = ''\n chain = ''\n dexs: dict = load_dex_info()\n skip_cache = False\n task_airflow_execution_time = '30 1 * * *'\n task_name = 'dex_pair_daily_stats'\n table_name = 'dex_pair_daily_stats'\n\n valid_null_fields = [\n 'token_0_price',\n 'token_1_price',\n 'trade_count'\n ]\n\n valid_less_than_zero_fields = [\n 'token_0_price',\n 'token_1_price',\n 'trade_count'\n ]\n\n def __init__(self, execution_date=None):\n super().__init__(execution_date)\n transport = AIOHTTPTransport(url=\"https://graphql.bitquery.io\")\n self.gql_client = Client(transport=transport, fetch_schema_from_transport=True, execute_timeout=240)\n\n def get_execution_date(self):\n now = moment.utcnow().datetime\n yesterday = now - timedelta(days=1) # yesterday time\n return yesterday\n\n def do_scrapy_data(self):\n df = self.load_data()\n self.save_pairs(df)\n\n def get_pairs_from_bitquery(self):\n print('start fetch bitquery data ')\n fliter_ = \"\"\"{\n ethereum(network: %s) {\n dexTrades(\n options: {limit: 100000, asc: \"timeInterval.day\"}\n date: {is: \"%s\"}\n ) {\n timeInterval {\n day(count: 1)\n }\n protocol\n exchange{\n name\n }\n baseCurrency {\n symbol\n address\n }\n quoteCurrency {\n symbol\n address\n }\n baseAmount\n quoteAmount\n tradeAmount(in: USD)\n trades: count\n side\n }\n }\n }\n \"\"\" % (self.network, self.execution_date_str)\n # Execute the query on the transport\n print('query from bitquery ', fliter_)\n result = self.gql_client.execute(gql(fliter_))\n pairs = result['ethereum']['dexTrades']\n print('get pairs from bitquery nums ', len(pairs))\n # time.sleep(20)\n return pairs\n\n def flatten_column(self, df: pd.DataFrame, column_names: list):\n sub_dfs = []\n for column in column_names:\n print('flatten column', column)\n sub_df = pd.json_normalize(df[column].tolist()).add_prefix(column + '_')\n sub_dfs.append(sub_df)\n df = df.drop(column_names, axis=1)\n df = df.join(sub_dfs)\n return df\n\n def get_csv_file_name(self):\n dags_folder = project_config.dags_folder\n return os.path.join(dags_folder, '../data/dex/{}_pairs-{}.csv'.format(\n self.network.lower(),\n self.execution_date_str))\n\n def get_pair_price_csv_filename(self):\n dags_folder = project_config.dags_folder\n return os.path.join(dags_folder,\n '../data/dex/{}_pairs_price_{}.csv'.format(\n self.network.lower(),\n self.execution_date_str))\n\n def load_data(self):\n if os.path.isfile(self.get_csv_file_name()):\n return\n pairs = self.get_pairs_from_bitquery()\n df = pd.DataFrame(pairs)\n df = self.flatten_column(df, ['baseCurrency', 'quoteCurrency', 'timeInterval', 'exchange'])\n df.to_csv(self.get_csv_file_name(), index=False)\n return df\n\n def fix_name_and_set_pair_usd_price(self, obj):\n key = obj['exchange_name'] + obj['protocol']\n if key in self.dexs:\n obj['exchange_name'] = self.dexs[key]['name']\n obj['protocol_id'] = self.dexs[key]['protocol_id']\n else:\n obj['protocol_id'] = 0\n\n obj['slug'] = slug(obj['exchange_name'])\n obj['token_0_price'] = 0 if obj['baseAmount'] == 0 else obj['tradeAmount'] / obj['baseAmount']\n obj['token_1_price'] = 0 if obj['quoteAmount'] == 0 else obj['tradeAmount'] / obj['quoteAmount']\n return obj\n\n def save_pairs(self, df):\n if os.path.isfile(self.get_pair_price_csv_filename()):\n if not self.skip_cache:\n return\n\n df['protocol_id'] = 0\n df['slug'] = 'none'\n df['chain'] = self.chain\n\n df = df[df['exchange_name'].notna()]\n\n df = df.apply(self.fix_name_and_set_pair_usd_price, axis=1)\n df = df.rename(columns={\n 'tradeAmount': 'volume',\n 'exchange_name': 'name',\n 'baseCurrency_address': 'token_0',\n 'quoteCurrency_address': 'token_1',\n 'baseCurrency_symbol': 'token_0_symbol',\n 'quoteCurrency_symbol': 'token_1_symbol',\n 'timeInterval_day': 'day',\n 'trades': 'trade_count',\n })\n df = df[['day', 'protocol_id', 'slug', 'protocol', 'name', 'chain', 'volume',\n 'token_0', 'token_0_symbol', 'token_0_price',\n 'token_1', 'token_1_symbol', 'token_1_price',\n 'trade_count']]\n df.to_csv(self.get_pair_price_csv_filename(), index=False)\n\n def do_upload_csv_to_gsc(self):\n source_csv_file = self.get_pair_price_csv_filename()\n task_name = self.network.lower() + '_' + self.task_name\n destination_file_path = '{folder}/{execution_date}_{name}.csv'.format(\n folder=self.task_name,\n name=task_name,\n execution_date=self.execution_date_str)\n self.upload_csv_to_gsc_with_cache(source_csv_file, destination_file_path)\n\n def airflow_dag_params(self):\n dag_params = {\n \"dag_id\": \"footprint_{}_dag\".format(self.task_name + '_' + self.network),\n \"catchup\": False,\n \"schedule_interval\": self.task_airflow_execution_time,\n \"description\": \"{}_dag\".format(self.task_name),\n \"default_args\": {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2021, 7, 1)\n },\n \"dagrun_timeout\": timedelta(days=30)\n }\n print('dag_params', dag_params)\n return dag_params\n\n def get_monthly_file_name(self, month_str):\n dags_folder = project_config.dags_folder\n one_file_path = os.path.join(dags_folder, '../data/dex_monthly/{}_pairs_price_{}.csv'.format(\n self.network.lower(), month_str))\n return one_file_path\n\n def merge_csv(self, days, month_str):\n dags_folder = project_config.dags_folder\n print(month_str)\n one_file_path = self.get_monthly_file_name(month_str)\n\n if os.path.isfile(one_file_path):\n return\n\n all_filenames = []\n for d in days:\n p = os.path.join(dags_folder, '../data/dex/{}_pairs_price_{}.csv'.format(\n self.network.lower(), d))\n if not os.path.isfile(p):\n print(p)\n raise Exception('file not exist ' + d)\n all_filenames.append(p)\n\n combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])\n combined_csv.to_csv(one_file_path, index=False)\n print(f'merge {len(all_filenames)} files to one {one_file_path}')\n\n def upload_monthly_csv_to_gsc(self, month_str):\n source_csv_file = self.get_monthly_file_name(month_str)\n task_name = self.network.lower() + '_' + self.task_name\n destination_file_path = '{folder}/{execution_date}_{name}.csv'.format(\n folder=self.task_name,\n name=task_name,\n execution_date=month_str)\n print('destination_file_path', destination_file_path)\n self.upload_csv_to_gsc_with_cache(source_csv_file, destination_file_path)\n\n def merge_csv_by_month(self, start, end):\n start_date = moment.utc(start).datetime\n end_date = moment.utc(end).datetime\n month = start_date.month\n days = []\n for n in range(1, int((end_date - start_date).days) + 1):\n it = (start_date + timedelta(n))\n pass_day = (start_date + timedelta(n - 1))\n if it.month != month:\n print(days)\n month_str = '{}-{}'.format(pass_day.year, pass_day.month)\n self.merge_csv(days, month_str)\n self.upload_monthly_csv_to_gsc(month_str)\n month = it.month\n days = []\n days.append(it.strftime(\"%Y-%m-%d\"))\n\n def upload_csv_to_gsc_with_cache(self, source_csv_file, destination_file_path):\n if BigQueryCheckPoint.has_check_point(destination_file_path, self.execution_date_str):\n return False\n upload_csv_to_gsc(source_csv_file, destination_file_path)\n BigQueryCheckPoint.set_check_point(destination_file_path, self.execution_date_str)\n\n def save_monitor(self, rule_name: str, item_value: int, result_code: int, desc: str, desc_cn: str, sql: str, field: str = None):\n query = {\n 'task_name': self.task_name + '_' + self.network,\n 'rule_name': rule_name,\n 'field': field or '',\n 'stats_date': DateUtil.utc_start_of_date(self.execution_date)\n }\n\n update = {\n 'database_name': project_config.bigquery_etl_database,\n 'table_name': self.table_name,\n 'desc': desc,\n 'item_value': item_value,\n 'result_code': result_code,\n 'sql': sql,\n 'desc_cn': desc_cn\n }\n MonitorDashBoard.update_one(query=query, set_dict=update, upsert=True)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
Lamanova/Python
[ "deec41917cd96b32834e65de8071f50544d6564d" ]
[ "Module-3/assignment6.py" ]
[ "'''\nauthor: Lama Hamadeh\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n#\n# TODO: Load up the Seeds Dataset into a Dataframe\n# It's located at 'Datasets/wheat.data'\n# \n# .. your code here ..\n\nwheat_dataset=pd.read_csv('/Users/ADB3HAMADL/Desktop/Anaconda_Packages/DAT210x-master/Module3/Datasets/wheat.data',index_col = 0)\n#\n# TODO: Drop the 'id' feature\n# \n# .. your code here ..\n\n\n#\n# TODO: Compute the correlation matrix of your dataframe\n# \n# .. your code here ..\n\nwheat_dataset=wheat_dataset.corr() #creating the correlation matrix for our dataframe\n#\n# TODO: Graph the correlation matrix using imshow or matshow\n# \n# .. your code here ..\nplt.imshow(wheat_dataset.corr(), cmap=plt.cm.Blues, interpolation='nearest')\nplt.colorbar()\ntick_marks = [i for i in range(len(wheat_dataset.columns))]\nplt.xticks(tick_marks, wheat_dataset.columns, rotation='vertical')\nplt.yticks(tick_marks, wheat_dataset.columns)\n\nplt.show()\n\n\nprint(wheat_dataset.corr()) #printing the correlation matrix for our dataframe\n" ]
[ [ "matplotlib.pyplot.yticks", "pandas.read_csv", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show" ] ]
uwescience/new_xstate
[ "f7563db624258f43c9547e974a0fd5929fc2fa5b" ]
[ "xstate/python/tests/common/test_data_provider.py" ]
[ "from common import data_provider\nfrom common.trinary_data import TrinaryData\nimport common.constants as cn\nfrom common_python.testing import helpers\nfrom common_python.util.persister import Persister\nimport common_python.util.dataframe as dataframe\n\nimport copy\nimport numpy as np\nimport os\nimport pandas as pd\nimport unittest\n\n\nIGNORE_TEST = False\nIS_PLOT = False\nSIZE = 4\n\n\nclass TestDataProvider(unittest.TestCase):\n\n def setUp(self):\n self.provider = data_provider.DataProvider()\n \n def tearDown(self):\n persister = Persister(cn.DATA_PROVIDER_PERSISTER_PATH)\n persister.remove()\n\n def init(self):\n self.df_data = self.provider._makeDFFromCSV(\n data_provider.FILENAME_READS)\n self.df_gene_description = \\\n self.provider._makeGeneDescriptionDF()\n self.provider.df_gene_description = self.df_gene_description\n self.df_data = self.df_data.set_index(cn.GENE_ID)\n\n def makeData(self, size=SIZE):\n df_data = pd.DataFrame({'a': range(10)})\n self.provider._dfs_centered_adjusted_read_count = [df_data for _ in range(SIZE)]\n\n def checkDF(self, df, is_check_index=True,\n is_check_column=True, is_check_times=False,\n **kwargs):\n \"\"\"\n Verifies DataFrames\n :param pd.DataFrame df:\n :param bool is_check_index: checks that index is GENE\n :param bool is_check_column: checks column for time format\n :param bool is_check_times: checks the time format\n :param dict kwargs: arguments passed to checkTimes\n \"\"\"\n # Non-zero length\n self.assertGreater(len(df), 0)\n # Has the right index\n if is_check_index:\n b = set(df.index).issubset( self.df_gene_description.index)\n if not b:\n import pdb; pdb.set_trace()\n self.assertTrue(b)\n # No nan values\n types = [np.dtype('int64'), np.dtype('float64'), np.dtype('bool')]\n if is_check_column:\n for column in df.columns:\n ser = df[column]\n if ser.dtype in types:\n is_nan = np.isnan(ser.sum(skipna=False))\n self.assertFalse(is_nan)\n if is_check_times:\n self.checkTimes(df.columns, **kwargs)\n\n def checkTimes(self, times, is_replicated=False):\n \"\"\"\n Verifies that times have the correct format.\n :param list times:\n :param bool is_replicated: expects a .%d format\n \"\"\"\n columns = []\n for time in times:\n self.assertTrue(\"T\" in time)\n if is_replicated:\n self.assertTrue(data_provider.SEPARATOR in time)\n splits = time.split(data_provider.SEPARATOR)\n columns.append(splits[0])\n else:\n columns.append(time)\n diff = set(columns).symmetric_difference(\n self.provider.df_normalized.columns)\n self.assertEqual(len(diff), 0)\n \n def testEquals(self):\n if IGNORE_TEST:\n return\n self.assertTrue(self.provider.equals(self.provider))\n provider = copy.deepcopy(self.provider)\n self.provider.do()\n self.assertFalse(self.provider.equals(provider))\n self.assertTrue(self.provider.equals(self.provider))\n\n def testConstructor(self):\n if IGNORE_TEST:\n return\n self.init()\n self.assertTrue(\"data\" in self.provider._data_dir)\n\n def testmakeDFFromCSV(self):\n if IGNORE_TEST:\n return\n self.init()\n df = self.provider._makeDFFromCSV(data_provider.FILENAME_HYPOXIA)\n self.assertGreater(len(df), 0)\n\n def testMakeHypoxiaDF(self):\n if IGNORE_TEST:\n return\n self.init()\n df = self.provider._makeHypoxiaDF()\n expected_columns = [\n cn.STD, cn.CV, cn.HOURS, cn.SAMPLE, cn.MEAN, 0, 1, 2]\n trues = [c in df.columns for c in expected_columns]\n self.assertTrue(all(trues))\n self.assertGreater(len(df), 0)\n\n def testMakeGeneDescriptionDF(self):\n if IGNORE_TEST:\n return\n self.init()\n df = self.provider._makeGeneDescriptionDF()\n self.checkDF(df, is_check_column=False)\n\n def testGetNumRepl(self):\n if IGNORE_TEST:\n return\n self.init()\n self.provider._dfs_centered_adjusted_read_count = range(3)\n self.assertEqual(self.provider._getNumRepl(), 3)\n\n def testMakeMeanDF(self):\n if IGNORE_TEST:\n return\n self.init()\n self.makeData()\n df = self.provider._makeMeanDF()\n df = df.applymap(lambda v: int(v))\n self.assertTrue(df.equals(self.provider.dfs_centered_adjusted_read_count[0]))\n\n def testMakeStdDF(self):\n if IGNORE_TEST:\n return\n self.init()\n self.makeData()\n df = self.provider._makeStdDF()\n self.assertTrue(np.isclose(df.sum().sum(), 0))\n\n def testReduceDF(self):\n if IGNORE_TEST:\n return\n self.init()\n df = self.provider._reduceDF(self.df_data)\n self.assertGreater(len(self.df_data), len(df))\n difference = set(df.columns).symmetric_difference(\n self.df_data.columns)\n self.assertEqual(len(difference), 0)\n\n def concatDFS(self):\n dfs = []\n dfs.extend(self.provider.dfs_read_count)\n dfs.extend(self.provider.dfs_adjusted_read_count)\n dfs.extend(self.provider.dfs_adjusted_read_count_wrtT0)\n dfs.extend(\n self.provider.dfs_adjusted_read_count_wrtT0_log2)\n dfs.extend(\n self.provider.dfs_centered_adjusted_read_count)\n return dfs\n\n def testDo(self):\n if IGNORE_TEST:\n return\n def testLessEqual(dfs1, dfs2):\n for idx in range(len(dfs1)):\n df1 = dfs1[idx]\n df2 = dfs2[idx]\n for gene in df1.index:\n ser = df1.loc[gene, :] <= df2.loc[gene, :]\n self.assertEqual(ser.sum(), len(ser))\n #\n self.init()\n self.provider.do()\n # Specific tests\n dfs_adjusted_read_count \\\n = self.provider.dfs_adjusted_read_count\n dfs_adjusted_read_count_wrtT0 \\\n = self.provider.dfs_adjusted_read_count_wrtT0\n dfs_adjusted_read_count_wrtT0_log2 \\\n = self.provider.dfs_adjusted_read_count_wrtT0_log2\n dfs_centered_adjusted_read_count \\\n = self.provider.dfs_centered_adjusted_read_count\n testLessEqual(dfs_centered_adjusted_read_count,\n dfs_adjusted_read_count)\n # Lists\n self.assertTrue(isinstance(self.provider.tfs, list))\n self.assertGreater(len(self.provider.tfs), 0)\n # Common tests\n self.assertEqual(\n len(dfs_centered_adjusted_read_count),\n data_provider.NUM_REPL)\n [self.checkDF(df, is_replicated=True) for df in \n dfs_centered_adjusted_read_count]\n dfs = [\n self.provider.df_gene_description,\n self.provider.df_mean,\n self.provider.df_std,\n self.provider.df_normalized,\n self.provider.df_gene_expression_state,\n ]\n for idx, df in enumerate(dfs):\n self.checkDF(df, is_check_index=False,\n is_check_times=False)\n for idx, df in enumerate(self.concatDFS()):\n self.checkDF(df, is_replicated=True,\n is_check_times=True)\n dfs = [\n self.provider.df_cv,\n self.provider.df_kegg_gene_pathways, \n self.provider.df_go_terms, \n self.provider.df_ec_terms, \n self.provider.df_ko_terms, \n self.provider.df_kegg_pathways,\n self.provider.df_trn_signed,\n self.provider.df_trn_unsigned,\n ]\n for idx, df in enumerate(dfs):\n self.checkDF(df, is_check_index=False,\n is_check_column=False)\n columns = self.provider.df_stage_matrix.columns\n diff = set(columns).symmetric_difference(\n [cn.STAGE_NAME, cn.STAGE_COLOR])\n self.assertEqual(len(diff), 0)\n self.assertGreater(len(self.provider.df_stage_matrix), 0)\n\n def testPersistence(self):\n if IGNORE_TEST:\n return\n self.provider.do()\n provider = data_provider.DataProvider()\n provider.do()\n self.provider.equals(provider)\n\n def testNormalizeReadsDF(self):\n if IGNORE_TEST:\n return\n provider = data_provider.DataProvider(\n is_only_qgenes=False, is_display_errors=False)\n provider.do()\n df = provider.dfs_read_count[0]\n df_normalized = provider.normalizeReadsDF(df)\n columns = [\"T%d\" % n for n in range(len(df.columns))]\n self.assertTrue(helpers.isValidDataFrame(df_normalized,\n columns))\n self.assertEqual(len(df), len(df_normalized))\n #ser_length = provider.df_gene_description[cn.LENGTH]\n\n def testGetStates(self):\n if IGNORE_TEST:\n return\n self.provider.do()\n def test(timepoints):\n results = self.provider.getStates(timepoints)\n self.assertEqual(results[0], \"Normoxia\")\n if len(results) > 1:\n self.assertEqual(results[1], \"Resuscitation\")\n #\n test([\"T1\", \"T25\"])\n test([\"T1\"])\n test([1, 25])\n test([1])\n\n def testGetStateNames(self):\n if IGNORE_TEST:\n return\n self.provider.do()\n trinary = TrinaryData()\n result1s = self.provider.getStateNames(trinary.ser_y)\n count = len(set(trinary.ser_y.values))\n self.assertEqual(len(result1s), count)\n #\n ser_y = trinary.ser_y.copy()\n indices = [i + \".0\" for i in ser_y.index]\n ser_y.index = indices\n result2s = self.provider.getStateNames(trinary.ser_y)\n self.assertTrue(all([v1 == v2 for v1, v2 in zip (result1s, result2s)]))\n\n def testCalcRefPooled(self):\n if IGNORE_TEST:\n return\n df = self.provider._getLog2NormalizedReadcounts()\n ser = self.provider.calcRefPooled(df)\n trues = [v >= 0 for v in ser.values]\n self.assertTrue(all(trues))\n\n def testMakeNormalizedDF(self):\n if IGNORE_TEST:\n return\n provider = data_provider.DataProvider()\n self.provider.do()\n df1 = self.provider._makeNormalizedDF()\n provider = data_provider.DataProvider(calcRef=self.provider.calcRefPooled)\n provider.do()\n df2 = provider._makeNormalizedDF()\n self.assertTrue(isinstance(df1, pd.DataFrame))\n self.assertTrue(isinstance(df2, pd.DataFrame))\n diff = set(df1.columns).symmetric_difference(df2.columns)\n self.assertEqual(len(diff), 0)\n self.assertEqual(len(df1), len(df2))\n self.assertFalse(df1.equals(df2))\n\n def testGetStateNameForTimepoint(self):\n if IGNORE_TEST:\n return\n self.provider.do()\n name = self.provider.getStateNameForTimepoint(\"T0\")\n self.assertEqual(name, \"Normoxia\")\n name = self.provider.getStateNameForTimepoint(\"T24\")\n self.assertEqual(name, \"Resuscitation\")\n \n\n\nif __name__ == '__main__':\n unittest.main(failfast=True)\n" ]
[ [ "numpy.dtype" ] ]
enliktjioe/nn2020
[ "930ef4168ffbddbb5e81a782ba1328077a4f2525" ]
[ "practice3/neural_net.py" ]
[ "from __future__ import print_function\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom past.builtins import xrange\r\n\r\nclass TwoLayerNet(object):\r\n \"\"\"\r\n A two-layer fully-connected neural network. The net has an input dimension of\r\n N, a hidden layer dimension of H, and performs classification over C classes.\r\n We train the network with a softmax loss function and L2 regularization on the\r\n weight matrices. The network uses a ReLU nonlinearity after the first fully\r\n connected layer.\r\n\r\n In other words, the network has the following architecture:\r\n\r\n input - fully connected layer - ReLU - fully connected layer - softmax\r\n\r\n The outputs of the second fully-connected layer are the scores for each class.\r\n \"\"\"\r\n\r\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\r\n \"\"\"\r\n Initialize the model. Weights are initialized to small random values and\r\n biases are initialized to zero. Weights and biases are stored in the\r\n variable self.params, which is a dictionary with the following keys:\r\n\r\n W1: First layer weights; has shape (D, M)\r\n b1: First layer biases; has shape (M,)\r\n W2: Second layer weights; has shape (M, C)\r\n b2: Second layer biases; has shape (C,)\r\n\r\n Inputs:\r\n - input_size: The dimension D of the input data.\r\n - hidden_size: The number of neurons M in the hidden layer.\r\n - output_size: The number of classes C.\r\n \"\"\"\r\n self.params = {}\r\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\r\n self.params['b1'] = np.zeros(hidden_size)\r\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\r\n self.params['b2'] = np.zeros(output_size)\r\n\r\n def loss(self, X, y=None, reg=0.0):\r\n \"\"\"\r\n Compute the loss and gradients for a two layer fully connected neural\r\n network.\r\n\r\n Inputs:\r\n - X: Input data of shape (N, D). Each X[i] is a training sample.\r\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\r\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\r\n is not passed then we only return scores, and if it is passed then we\r\n instead return the loss and gradients.\r\n - reg: Regularization strength.\r\n\r\n Returns:\r\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\r\n the score for class c on input X[i].\r\n\r\n If y is not None, instead return a tuple of:\r\n - loss: Loss (data loss and regularization loss) for this batch of training\r\n samples.\r\n - grads: Dictionary mapping parameter names to gradients of those parameters\r\n with respect to the loss function; has the same keys as self.params.\r\n \"\"\"\r\n # Unpack variables from the params dictionary\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n N, D = X.shape\r\n\r\n # Compute the forward pass\r\n scores = None\r\n #############################################################################\r\n # TODO: Perform the forward pass, computing the class scores for the input. #\r\n # Store the result in the scores variable, which should be an array of #\r\n # shape (N, C). #\r\n #############################################################################\r\n A1 = X.dot(W1) + b1\r\n H1 = np.maximum(A1, 0)\r\n scores = H1.dot(W2) + b2\r\n #############################################################################\r\n # END OF YOUR CODE #\r\n #############################################################################\r\n \r\n # If the targets are not given then jump out, we're done\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n loss = None\r\n #############################################################################\r\n # TODO: Finish the forward pass, and compute the loss. This should include #\r\n # both the data loss and L2 regularization for W1 and W2. Store the result #\r\n # in the variable loss, which should be a scalar. Use the Softmax #\r\n # classifier loss. #\r\n #############################################################################\r\n scores_exp = np.exp(scores)\r\n p = scores_exp / (np.sum(scores_exp, axis=1, keepdims=True))\r\n loss = -np.mean(np.log(p[np.arange(N), y]))\r\n loss += reg * (np.sum(W1 ** 2) + np.sum(W2 ** 2))\r\n\r\n #############################################################################\r\n # END OF YOUR CODE #\r\n #############################################################################\r\n \r\n # Backward pass: compute gradients\r\n grads = {}\r\n #############################################################################\r\n # TODO: Compute the backward pass, computing the derivatives of the weights #\r\n # and biases. Store the results in the grads dictionary. For example, #\r\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\r\n #############################################################################\r\n d_scores = p\r\n d_scores[range(N), y] -= 1\r\n d_scores = d_scores / N\r\n grads['W2'] = H1.T.dot(d_scores)\r\n grads['b2'] = np.sum(d_scores, axis=0)\r\n d_H1 = d_scores.dot(W2.T)\r\n d_H1[A1 <= 0] = 0\r\n\r\n grads['W1'] = X.T.dot(d_H1)\r\n grads['b1'] = np.sum(d_H1, axis=0)\r\n\r\n grads['W2'] += reg * W2\r\n grads['W1'] += reg * W1\r\n #############################################################################\r\n # END OF YOUR CODE #\r\n #############################################################################\r\n\r\n return loss, grads\r\n\r\n def train(self, X, y, X_val, y_val,\r\n learning_rate=1e-3, learning_rate_decay=0.95,\r\n reg=5e-6, num_iters=100,\r\n batch_size=200, verbose=False):\r\n \"\"\"\r\n Train this neural network using stochastic gradient descent.\r\n\r\n Inputs:\r\n - X: A numpy array of shape (N, D) giving training data.\r\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\r\n X[i] has label c, where 0 <= c < C.\r\n - X_val: A numpy array of shape (N_val, D) giving validation data.\r\n - y_val: A numpy array of shape (N_val,) giving validation labels.\r\n - learning_rate: Scalar giving learning rate for optimization.\r\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\r\n after each epoch.\r\n - reg: Scalar giving regularization strength.\r\n - num_iters: Number of steps to take when optimizing.\r\n - batch_size: Number of training examples to use per step.\r\n - verbose: boolean; if true print progress during optimization.\r\n \"\"\"\r\n num_train = X.shape[0]\r\n iterations_per_epoch = max(num_train / batch_size, 1)\r\n\r\n # Use SGD to optimize the parameters in self.model\r\n loss_history = []\r\n train_acc_history = []\r\n val_acc_history = []\r\n\r\n for it in xrange(num_iters):\r\n X_batch = None\r\n y_batch = None\r\n\r\n #########################################################################\r\n # TODO: Create a random minibatch of training data and labels, storing #\r\n # them in X_batch and y_batch respectively. #\r\n #########################################################################\r\n samples = np.random.choice(num_train, batch_size)\r\n X_batch = X[samples]\r\n y_batch = y[samples]\r\n #########################################################################\r\n # END OF YOUR CODE #\r\n #########################################################################\r\n\r\n # Compute loss and gradients using the current minibatch\r\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\r\n loss_history.append(loss)\r\n\r\n #########################################################################\r\n # TODO: Use the gradients in the grads dictionary to update the #\r\n # parameters of the network (stored in the dictionary self.params) #\r\n # using stochastic gradient descent. You'll need to use the gradients #\r\n # stored in the grads dictionary defined above. #\r\n #########################################################################\r\n self.params['W1'] += -learning_rate * grads['W1']\r\n self.params['W2'] += -learning_rate * grads['W2']\r\n self.params['b1'] += -learning_rate * grads['b1']\r\n self.params['b2'] += -learning_rate * grads['b2']\r\n #########################################################################\r\n # END OF YOUR CODE #\r\n #########################################################################\r\n\r\n if verbose and it % 100 == 0:\r\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\r\n\r\n # Every epoch, check train and val accuracy and decay learning rate.\r\n if it % iterations_per_epoch == 0:\r\n # Check accuracy\r\n train_acc = (self.predict(X_batch) == y_batch).mean()\r\n val_acc = (self.predict(X_val) == y_val).mean()\r\n train_acc_history.append(train_acc)\r\n val_acc_history.append(val_acc)\r\n\r\n # Decay learning rate\r\n learning_rate *= learning_rate_decay\r\n\r\n return {\r\n 'loss_history': loss_history,\r\n 'train_acc_history': train_acc_history,\r\n 'val_acc_history': val_acc_history,\r\n }\r\n\r\n def predict(self, X):\r\n \"\"\"\r\n Use the trained weights of this two-layer network to predict labels for\r\n data points. For each data point we predict scores for each of the C\r\n classes, and assign each data point to the class with the highest score.\r\n\r\n Inputs:\r\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\r\n classify.\r\n\r\n Returns:\r\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\r\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\r\n to have class c, where 0 <= c < C.\r\n \"\"\"\r\n y_pred = None\r\n\r\n ###########################################################################\r\n # TODO: Implement this function; it should be VERY simple! #\r\n ###########################################################################\r\n z1 = X.dot(self.params['W1']) + self.params['b1']\r\n a1 = np.maximum(z1, 0)\r\n scores = a1.dot(self.params['W2']) + self.params['b2']\r\n y_pred = np.argmax(scores, axis=1)\r\n ###########################################################################\r\n # END OF YOUR CODE #\r\n ###########################################################################\r\n\r\n return y_pred\r\n\r\n\r\n" ]
[ [ "numpy.maximum", "numpy.random.choice", "numpy.arange", "numpy.argmax", "numpy.random.randn", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
gzdx-chenghui/qlib-lihua
[ "d5a0ccca059509007b3ea8d734fdbbc77ea915f8" ]
[ "qlib1/data/ops.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.stats import percentileofscore\n\nfrom .base import Expression, ExpressionOps\nfrom ..log import get_module_logger\n\ntry:\n from ._libs.rolling import rolling_slope, rolling_rsquare, rolling_resi\n from ._libs.expanding import expanding_slope, expanding_rsquare, expanding_resi\nexcept ImportError as err:\n print(\"Do not import qlib1 package in the repository directory!\")\n raise\n\n__all__ = (\n \"Ref\",\n \"Max\",\n \"Min\",\n \"Sum\",\n \"Mean\",\n \"Std\",\n \"Var\",\n \"Skew\",\n \"Kurt\",\n \"Med\",\n \"Mad\",\n \"Slope\",\n \"Rsquare\",\n \"Resi\",\n \"Rank\",\n \"Quantile\",\n \"Count\",\n \"EMA\",\n \"WMA\",\n \"Corr\",\n \"Cov\",\n \"Delta\",\n \"Abs\",\n \"Sign\",\n \"Log\",\n \"Power\",\n \"Add\",\n \"Sub\",\n \"Mul\",\n \"Div\",\n \"Greater\",\n \"Less\",\n \"And\",\n \"Or\",\n \"Not\",\n \"Gt\",\n \"Ge\",\n \"Lt\",\n \"Le\",\n \"Eq\",\n \"Ne\",\n \"Mask\",\n \"IdxMax\",\n \"IdxMin\",\n \"If\",\n)\n\nnp.seterr(invalid=\"ignore\")\n\n#################### Element-Wise Operator ####################\n\n\nclass ElemOperator(ExpressionOps):\n \"\"\"Element-wise Operator\n\n Parameters\n ----------\n feature : Expression\n feature instance\n func : str\n feature operation method\n\n Returns\n ----------\n Expression\n feature operation output\n \"\"\"\n\n def __init__(self, feature, func):\n self.feature = feature\n self.func = func\n\n def __str__(self):\n return \"{}({})\".format(type(self).__name__, self.feature)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n return getattr(np, self.func)(series)\n\n def get_longest_back_rolling(self):\n return self.feature.get_longest_back_rolling()\n\n def get_extended_window_size(self):\n return self.feature.get_extended_window_size()\n\n\nclass Abs(ElemOperator):\n \"\"\"Feature Absolute Value\n\n Parameters\n ----------\n feature : Expression\n feature instance\n\n Returns\n ----------\n Expression\n a feature instance with absolute output\n \"\"\"\n\n def __init__(self, feature):\n super(Abs, self).__init__(feature, \"abs\")\n\n\nclass Sign(ElemOperator):\n \"\"\"Feature Sign\n\n Parameters\n ----------\n feature : Expression\n feature instance\n\n Returns\n ----------\n Expression\n a feature instance with sign\n \"\"\"\n\n def __init__(self, feature):\n super(Sign, self).__init__(feature, \"sign\")\n\n\nclass Log(ElemOperator):\n \"\"\"Feature Log\n\n Parameters\n ----------\n feature : Expression\n feature instance\n\n Returns\n ----------\n Expression\n a feature instance with log\n \"\"\"\n\n def __init__(self, feature):\n super(Log, self).__init__(feature, \"log\")\n\n\nclass Power(ElemOperator):\n \"\"\"Feature Power\n\n Parameters\n ----------\n feature : Expression\n feature instance\n\n Returns\n ----------\n Expression\n a feature instance with power\n \"\"\"\n\n def __init__(self, feature, exponent):\n super(Power, self).__init__(feature, \"power\")\n self.exponent = exponent\n\n def __str__(self):\n return \"{}({},{})\".format(type(self).__name__, self.feature, self.exponent)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n return getattr(np, self.func)(series, self.exponent)\n\n\nclass Mask(ElemOperator):\n \"\"\"Feature Mask\n\n Parameters\n ----------\n feature : Expression\n feature instance\n instrument : str\n instrument mask\n\n Returns\n ----------\n Expression\n a feature instance with masked instrument\n \"\"\"\n\n def __init__(self, feature, instrument):\n super(Mask, self).__init__(feature, \"mask\")\n self.instrument = instrument\n\n def __str__(self):\n return \"{}({},{})\".format(type(self).__name__, self.feature, self.instrument.lower())\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n return self.feature.load(self.instrument, start_index, end_index, freq)\n\n\nclass Not(ElemOperator):\n \"\"\"Not Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n feature elementwise not output\n \"\"\"\n\n def __init__(self, feature):\n super(Not, self).__init__(feature, \"bitwise_not\")\n\n\n#################### Pair-Wise Operator ####################\nclass PairOperator(ExpressionOps):\n \"\"\"Pair-wise operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance or numeric value\n feature_right : Expression\n feature instance or numeric value\n func : str\n operator function\n\n Returns\n ----------\n Feature:\n two features' operation output\n \"\"\"\n\n def __init__(self, feature_left, feature_right, func):\n self.feature_left = feature_left\n self.feature_right = feature_right\n self.func = func\n\n def __str__(self):\n return \"{}({},{})\".format(type(self).__name__, self.feature_left, self.feature_right)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n assert any(\n [isinstance(self.feature_left, Expression), self.feature_right, Expression]\n ), \"at least one of two inputs is Expression instance\"\n if isinstance(self.feature_left, Expression):\n series_left = self.feature_left.load(instrument, start_index, end_index, freq)\n else:\n series_left = self.feature_left # numeric value\n if isinstance(self.feature_right, Expression):\n series_right = self.feature_right.load(instrument, start_index, end_index, freq)\n else:\n series_right = self.feature_right\n return getattr(np, self.func)(series_left, series_right)\n\n def get_longest_back_rolling(self):\n if isinstance(self.feature_left, Expression):\n left_br = self.feature_left.get_longest_back_rolling()\n else:\n left_br = 0\n\n if isinstance(self.feature_right, Expression):\n right_br = self.feature_right.get_longest_back_rolling()\n else:\n right_br = 0\n return max(left_br, right_br)\n\n def get_extended_window_size(self):\n if isinstance(self.feature_left, Expression):\n ll, lr = self.feature_left.get_extended_window_size()\n else:\n ll, lr = 0, 0\n\n if isinstance(self.feature_right, Expression):\n rl, rr = self.feature_right.get_extended_window_size()\n else:\n rl, rr = 0, 0\n return max(ll, rl), max(lr, rr)\n\n\nclass Add(PairOperator):\n \"\"\"Add Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' sum\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Add, self).__init__(feature_left, feature_right, \"add\")\n\n\nclass Sub(PairOperator):\n \"\"\"Subtract Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' subtraction\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Sub, self).__init__(feature_left, feature_right, \"subtract\")\n\n\nclass Mul(PairOperator):\n \"\"\"Multiply Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' product\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Mul, self).__init__(feature_left, feature_right, \"multiply\")\n\n\nclass Div(PairOperator):\n \"\"\"Division Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' division\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Div, self).__init__(feature_left, feature_right, \"divide\")\n\n\nclass Greater(PairOperator):\n \"\"\"Greater Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n greater elements taken from the input two features\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Greater, self).__init__(feature_left, feature_right, \"maximum\")\n\n\nclass Less(PairOperator):\n \"\"\"Less Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n smaller elements taken from the input two features\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Less, self).__init__(feature_left, feature_right, \"minimum\")\n\n\nclass Gt(PairOperator):\n \"\"\"Greater Than Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left > right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Gt, self).__init__(feature_left, feature_right, \"greater\")\n\n\nclass Ge(PairOperator):\n \"\"\"Greater Equal Than Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left >= right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Ge, self).__init__(feature_left, feature_right, \"greater_equal\")\n\n\nclass Lt(PairOperator):\n \"\"\"Less Than Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left < right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Lt, self).__init__(feature_left, feature_right, \"less\")\n\n\nclass Le(PairOperator):\n \"\"\"Less Equal Than Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left <= right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Le, self).__init__(feature_left, feature_right, \"less_equal\")\n\n\nclass Eq(PairOperator):\n \"\"\"Equal Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left == right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Eq, self).__init__(feature_left, feature_right, \"equal\")\n\n\nclass Ne(PairOperator):\n \"\"\"Not Equal Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n bool series indicate `left != right`\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Ne, self).__init__(feature_left, feature_right, \"not_equal\")\n\n\nclass And(PairOperator):\n \"\"\"And Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' row by row & output\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(And, self).__init__(feature_left, feature_right, \"bitwise_and\")\n\n\nclass Or(PairOperator):\n \"\"\"Or Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n\n Returns\n ----------\n Feature:\n two features' row by row | outputs\n \"\"\"\n\n def __init__(self, feature_left, feature_right):\n super(Or, self).__init__(feature_left, feature_right, \"bitwise_or\")\n\n\n#################### Triple-wise Operator ####################\nclass If(ExpressionOps):\n \"\"\"If Operator\n\n Parameters\n ----------\n condition : Expression\n feature instance with bool values as condition\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n \"\"\"\n\n def __init__(self, condition, feature_left, feature_right):\n self.condition = condition\n self.feature_left = feature_left\n self.feature_right = feature_right\n\n def __str__(self):\n return \"If({},{},{})\".format(self.condition, self.feature_left, self.feature_right)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series_cond = self.condition.load(instrument, start_index, end_index, freq)\n if isinstance(self.feature_left, Expression):\n series_left = self.feature_left.load(instrument, start_index, end_index, freq)\n else:\n series_left = self.feature_left\n if isinstance(self.feature_right, Expression):\n series_right = self.feature_right.load(instrument, start_index, end_index, freq)\n else:\n series_right = self.feature_right\n series = pd.Series(np.where(series_cond, series_left, series_right), index=series_cond.index)\n return series\n\n def get_longest_back_rolling(self):\n if isinstance(self.feature_left, Expression):\n left_br = self.feature_left.get_longest_back_rolling()\n else:\n left_br = 0\n\n if isinstance(self.feature_right, Expression):\n right_br = self.feature_right.get_longest_back_rolling()\n else:\n right_br = 0\n\n if isinstance(self.condition, Expression):\n c_br = self.condition.get_longest_back_rolling()\n else:\n c_br = 0\n return max(left_br, right_br, c_br)\n\n def get_extended_window_size(self):\n if isinstance(self.feature_left, Expression):\n ll, lr = self.feature_left.get_extended_window_size()\n else:\n ll, lr = 0, 0\n\n if isinstance(self.feature_right, Expression):\n rl, rr = self.feature_right.get_extended_window_size()\n else:\n rl, rr = 0, 0\n\n if isinstance(self.condition, Expression):\n cl, cr = self.condition.get_extended_window_size()\n else:\n cl, cr = 0, 0\n return max(ll, rl, cl), max(lr, rr, cr)\n\n\n#################### Rolling ####################\n# NOTE: methods like `rolling.mean` are optimized with cython,\n# and are super faster than `rolling.apply(np.mean)`\n\n\nclass Rolling(ExpressionOps):\n \"\"\"Rolling Operator\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n func : str\n rolling method\n\n Returns\n ----------\n Expression\n rolling outputs\n \"\"\"\n\n def __init__(self, feature, N, func):\n self.feature = feature\n self.N = N\n self.func = func\n\n def __str__(self):\n return \"{}({},{})\".format(type(self).__name__, self.feature, self.N)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n # NOTE: remove all null check,\n # now it's user's responsibility to decide whether use features in null days\n # isnull = series.isnull() # NOTE: isnull = NaN, inf is not null\n if self.N == 0:\n series = getattr(series.expanding(min_periods=1), self.func)()\n elif 0 < self.N < 1:\n series = series.ewm(alpha=self.N, min_periods=1).mean()\n else:\n series = getattr(series.rolling(self.N, min_periods=1), self.func)()\n # series.iloc[:self.N-1] = np.nan\n # series[isnull] = np.nan\n return series\n\n def get_longest_back_rolling(self):\n if self.N == 0:\n return np.inf\n if 0 < self.N < 1:\n return int(np.log(1e-6) / np.log(1 - self.N)) # (1 - N)**window == 1e-6\n return self.feature.get_longest_back_rolling() + self.N - 1\n\n def get_extended_window_size(self):\n if self.N == 0:\n # FIXME: How to make this accurate and efficiently? Or should we\n # remove such support for N == 0?\n get_module_logger(self.__class__.__name__).warning(\"The Rolling(ATTR, 0) will not be accurately calculated\")\n return self.feature.get_extended_window_size()\n elif 0 < self.N < 1:\n lft_etd, rght_etd = self.feature.get_extended_window_size()\n size = int(np.log(1e-6) / np.log(1 - self.N))\n lft_etd = max(lft_etd + size - 1, lft_etd)\n return lft_etd, rght_etd\n else:\n lft_etd, rght_etd = self.feature.get_extended_window_size()\n lft_etd = max(lft_etd + self.N - 1, lft_etd)\n return lft_etd, rght_etd\n\n\nclass Ref(Rolling):\n \"\"\"Feature Reference\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n N = 0, retrieve the first data; N > 0, retrieve data of N periods ago; N < 0, future data\n\n Returns\n ----------\n Expression\n a feature instance with target reference\n \"\"\"\n\n def __init__(self, feature, N):\n super(Ref, self).__init__(feature, N, \"ref\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n # N = 0, return first day\n if series.empty:\n return series # Pandas bug, see: https://github.com/pandas-dev/pandas/issues/21049\n elif self.N == 0:\n series = pd.Series(series.iloc[0], index=series.index)\n else:\n series = series.shift(self.N) # copy\n return series\n\n def get_longest_back_rolling(self):\n if self.N == 0:\n return np.inf\n return self.feature.get_longest_back_rolling() + self.N\n\n def get_extended_window_size(self):\n if self.N == 0:\n get_module_logger(self.__class__.__name__).warning(\"The Ref(ATTR, 0) will not be accurately calculated\")\n return self.feature.get_extended_window_size()\n else:\n lft_etd, rght_etd = self.feature.get_extended_window_size()\n lft_etd = max(lft_etd + self.N, lft_etd)\n rght_etd = max(rght_etd - self.N, rght_etd)\n return lft_etd, rght_etd\n\n\nclass Mean(Rolling):\n \"\"\"Rolling Mean (MA)\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling average\n \"\"\"\n\n def __init__(self, feature, N):\n super(Mean, self).__init__(feature, N, \"mean\")\n\n\nclass Sum(Rolling):\n \"\"\"Rolling Sum\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling sum\n \"\"\"\n\n def __init__(self, feature, N):\n super(Sum, self).__init__(feature, N, \"sum\")\n\n\nclass Std(Rolling):\n \"\"\"Rolling Std\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling std\n \"\"\"\n\n def __init__(self, feature, N):\n super(Std, self).__init__(feature, N, \"std\")\n\n\nclass Var(Rolling):\n \"\"\"Rolling Variance\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling variance\n \"\"\"\n\n def __init__(self, feature, N):\n super(Var, self).__init__(feature, N, \"var\")\n\n\nclass Skew(Rolling):\n \"\"\"Rolling Skewness\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling skewness\n \"\"\"\n\n def __init__(self, feature, N):\n if N != 0 and N < 3:\n raise ValueError(\"The rolling window size of Skewness operation should >= 3\")\n super(Skew, self).__init__(feature, N, \"skew\")\n\n\nclass Kurt(Rolling):\n \"\"\"Rolling Kurtosis\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling kurtosis\n \"\"\"\n\n def __init__(self, feature, N):\n if N != 0 and N < 4:\n raise ValueError(\"The rolling window size of Kurtosis operation should >= 5\")\n super(Kurt, self).__init__(feature, N, \"kurt\")\n\n\nclass Max(Rolling):\n \"\"\"Rolling Max\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling max\n \"\"\"\n\n def __init__(self, feature, N):\n super(Max, self).__init__(feature, N, \"max\")\n\n\nclass IdxMax(Rolling):\n \"\"\"Rolling Max Index\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling max index\n \"\"\"\n\n def __init__(self, feature, N):\n super(IdxMax, self).__init__(feature, N, \"idxmax\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(lambda x: x.argmax() + 1, raw=True)\n else:\n series = series.rolling(self.N, min_periods=1).apply(lambda x: x.argmax() + 1, raw=True)\n return series\n\n\nclass Min(Rolling):\n \"\"\"Rolling Min\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling min\n \"\"\"\n\n def __init__(self, feature, N):\n super(Min, self).__init__(feature, N, \"min\")\n\n\nclass IdxMin(Rolling):\n \"\"\"Rolling Min Index\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling min index\n \"\"\"\n\n def __init__(self, feature, N):\n super(IdxMin, self).__init__(feature, N, \"idxmin\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(lambda x: x.argmin() + 1, raw=True)\n else:\n series = series.rolling(self.N, min_periods=1).apply(lambda x: x.argmin() + 1, raw=True)\n return series\n\n\nclass Quantile(Rolling):\n \"\"\"Rolling Quantile\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling quantile\n \"\"\"\n\n def __init__(self, feature, N, qscore):\n super(Quantile, self).__init__(feature, N, \"quantile\")\n self.qscore = qscore\n\n def __str__(self):\n return \"{}({},{},{})\".format(type(self).__name__, self.feature, self.N, self.qscore)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = series.expanding(min_periods=1).quantile(self.qscore)\n else:\n series = series.rolling(self.N, min_periods=1).quantile(self.qscore)\n return series\n\n\nclass Med(Rolling):\n \"\"\"Rolling Median\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling median\n \"\"\"\n\n def __init__(self, feature, N):\n super(Med, self).__init__(feature, N, \"median\")\n\n\nclass Mad(Rolling):\n \"\"\"Rolling Mean Absolute Deviation\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling mean absolute deviation\n \"\"\"\n\n def __init__(self, feature, N):\n super(Mad, self).__init__(feature, N, \"mad\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n # TODO: implement in Cython\n\n def mad(x):\n x1 = x[~np.isnan(x)]\n return np.mean(np.abs(x1 - x1.mean()))\n\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(mad, raw=True)\n else:\n series = series.rolling(self.N, min_periods=1).apply(mad, raw=True)\n return series\n\n\nclass Rank(Rolling):\n \"\"\"Rolling Rank (Percentile)\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling rank\n \"\"\"\n\n def __init__(self, feature, N):\n super(Rank, self).__init__(feature, N, \"rank\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n # TODO: implement in Cython\n\n def rank(x):\n if np.isnan(x[-1]):\n return np.nan\n x1 = x[~np.isnan(x)]\n if x1.shape[0] == 0:\n return np.nan\n return percentileofscore(x1, x1[-1]) / len(x1)\n\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(rank, raw=True)\n else:\n series = series.rolling(self.N, min_periods=1).apply(rank, raw=True)\n return series\n\n\nclass Count(Rolling):\n \"\"\"Rolling Count\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling count of number of non-NaN elements\n \"\"\"\n\n def __init__(self, feature, N):\n super(Count, self).__init__(feature, N, \"count\")\n\n\nclass Delta(Rolling):\n \"\"\"Rolling Delta\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with end minus start in rolling window\n \"\"\"\n\n def __init__(self, feature, N):\n super(Delta, self).__init__(feature, N, \"delta\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = series - series.iloc[0]\n else:\n series = series - series.shift(self.N)\n return series\n\n\n# TODO:\n# support pair-wise rolling like `Slope(A, B, N)`\nclass Slope(Rolling):\n \"\"\"Rolling Slope\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with regression slope of given window\n \"\"\"\n\n def __init__(self, feature, N):\n super(Slope, self).__init__(feature, N, \"slope\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = pd.Series(expanding_slope(series.values), index=series.index)\n else:\n series = pd.Series(rolling_slope(series.values, self.N), index=series.index)\n return series\n\n\nclass Rsquare(Rolling):\n \"\"\"Rolling R-value Square\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with regression r-value square of given window\n \"\"\"\n\n def __init__(self, feature, N):\n super(Rsquare, self).__init__(feature, N, \"rsquare\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n _series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = pd.Series(expanding_rsquare(_series.values), index=_series.index)\n else:\n series = pd.Series(rolling_rsquare(_series.values, self.N), index=_series.index)\n series.loc[np.isclose(_series.rolling(self.N, min_periods=1).std(), 0, atol=2e-05)] = np.nan\n return series\n\n\nclass Resi(Rolling):\n \"\"\"Rolling Regression Residuals\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with regression residuals of given window\n \"\"\"\n\n def __init__(self, feature, N):\n super(Resi, self).__init__(feature, N, \"resi\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = pd.Series(expanding_resi(series.values), index=series.index)\n else:\n series = pd.Series(rolling_resi(series.values, self.N), index=series.index)\n return series\n\n\nclass WMA(Rolling):\n \"\"\"Rolling WMA\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with weighted moving average output\n \"\"\"\n\n def __init__(self, feature, N):\n super(WMA, self).__init__(feature, N, \"wma\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n # TODO: implement in Cython\n\n def weighted_mean(x):\n w = np.arange(len(x))\n w = w / w.sum()\n return np.nanmean(w * x)\n\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(weighted_mean, raw=True)\n else:\n series = series.rolling(self.N, min_periods=1).apply(weighted_mean, raw=True)\n return series\n\n\nclass EMA(Rolling):\n \"\"\"Rolling Exponential Mean (EMA)\n\n Parameters\n ----------\n feature : Expression\n feature instance\n N : int, float\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with regression r-value square of given window\n \"\"\"\n\n def __init__(self, feature, N):\n super(EMA, self).__init__(feature, N, \"ema\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series = self.feature.load(instrument, start_index, end_index, freq)\n\n def exp_weighted_mean(x):\n a = 1 - 2 / (1 + len(x))\n w = a ** np.arange(len(x))[::-1]\n w /= w.sum()\n return np.nansum(w * x)\n\n if self.N == 0:\n series = series.expanding(min_periods=1).apply(exp_weighted_mean, raw=True)\n elif 0 < self.N < 1:\n series = series.ewm(alpha=self.N, min_periods=1).mean()\n else:\n series = series.ewm(span=self.N, min_periods=1).mean()\n return series\n\n\n#################### Pair-Wise Rolling ####################\nclass PairRolling(ExpressionOps):\n \"\"\"Pair Rolling Operator\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling output of two input features\n \"\"\"\n\n def __init__(self, feature_left, feature_right, N, func):\n self.feature_left = feature_left\n self.feature_right = feature_right\n self.N = N\n self.func = func\n\n def __str__(self):\n return \"{}({},{},{})\".format(type(self).__name__, self.feature_left, self.feature_right, self.N)\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n series_left = self.feature_left.load(instrument, start_index, end_index, freq)\n series_right = self.feature_right.load(instrument, start_index, end_index, freq)\n if self.N == 0:\n series = getattr(series_left.expanding(min_periods=1), self.func)(series_right)\n else:\n series = getattr(series_left.rolling(self.N, min_periods=1), self.func)(series_right)\n return series\n\n def get_longest_back_rolling(self):\n if self.N == 0:\n return np.inf\n return (\n max(self.feature_left.get_longest_back_rolling(), self.feature_right.get_longest_back_rolling())\n + self.N\n - 1\n )\n\n def get_extended_window_size(self):\n if self.N == 0:\n get_module_logger(self.__class__.__name__).warning(\n \"The PairRolling(ATTR, 0) will not be accurately calculated\"\n )\n return self.feature.get_extended_window_size()\n else:\n ll, lr = self.feature_left.get_extended_window_size()\n rl, rr = self.feature_right.get_extended_window_size()\n return max(ll, rl) + self.N - 1, max(lr, rr)\n\n\nclass Corr(PairRolling):\n \"\"\"Rolling Correlation\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling correlation of two input features\n \"\"\"\n\n def __init__(self, feature_left, feature_right, N):\n super(Corr, self).__init__(feature_left, feature_right, N, \"corr\")\n\n def _load_internal(self, instrument, start_index, end_index, freq):\n res = super(Corr, self)._load_internal(instrument, start_index, end_index, freq)\n\n # NOTE: Load uses MemCache, so calling load again will not cause performance degradation\n series_left = self.feature_left.load(instrument, start_index, end_index, freq)\n series_right = self.feature_right.load(instrument, start_index, end_index, freq)\n res.loc[\n np.isclose(series_left.rolling(self.N, min_periods=1).std(), 0, atol=2e-05)\n | np.isclose(series_right.rolling(self.N, min_periods=1).std(), 0, atol=2e-05)\n ] = np.nan\n return res\n\n\nclass Cov(PairRolling):\n \"\"\"Rolling Covariance\n\n Parameters\n ----------\n feature_left : Expression\n feature instance\n feature_right : Expression\n feature instance\n N : int\n rolling window size\n\n Returns\n ----------\n Expression\n a feature instance with rolling max of two input features\n \"\"\"\n\n def __init__(self, feature_left, feature_right, N):\n super(Cov, self).__init__(feature_left, feature_right, N, \"cov\")\n" ]
[ [ "numpy.log", "pandas.Series", "numpy.isnan", "scipy.stats.percentileofscore", "numpy.seterr", "numpy.nansum", "numpy.nanmean", "numpy.where" ] ]
sourcery-ai-bot/hpc-in-a-day
[ "51f5fafb04ed3cc0fa681055fba38593fc891f53" ]
[ "_episodes/code/03_parallel_jobs/generate_scrambled_data.py" ]
[ "#!/usr/bin/env python3\nimport sys\nimport numpy as np\n\nnp.random.seed(2017)\n\ndef inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = np.sqrt(x*x + y*y)\n\n count = len(radii[np.where(radii<=1.0)])\n\n return count, x, y\n\nif __name__=='__main__':\n\n n_samples = 4*1024*1024\n\n file_name = \"pi_estimate.log\"\n\n if \"help\" in \" \".join(sys.argv):\n print(\"usage: generate_scrambled_data.py <optional:file_name>\")\n print(\"\"\"\\n script generates file <file_name> of 0.5 GB\n that contains blocks of random bytes followed\n by a newline and an estimate of pi\"\"\")\n sys.exit(0)\n\n if len(sys.argv) > 1:\n file_name = sys.argv[1]\n\n sizeof = np.dtype(np.float32).itemsize\n targetsize_byte = .5*1024*1024*1024\n string_to_write = \"\"\n loop_count = 0\n\n while len(string_to_write) < targetsize_byte :\n count, data, more = inside_circle(n_samples)\n string_to_write += str(data.tostring())\n string_to_write += str(more.tostring())\n pi_estimate = (4.0 * count / n_samples)\n string_to_write += (\"\\n%f\\n\" % pi_estimate)\n if loop_count % 10 == 0:\n print(\">> %f GB generated\" % (len(string_to_write)/(1024*1024*1024.)))\n loop_count += 1\n\n print(\">> storing %f GB to %s\" % (len(string_to_write)/(1024*1024*1024.),file_name))\n with open(file_name,\"w\") as fh:\n fh.writelines(string_to_write)\n sys.exit(0)\n" ]
[ [ "numpy.sqrt", "numpy.random.seed", "numpy.dtype", "numpy.random.uniform", "numpy.where" ] ]
salesforce/CASPI
[ "3e4cd23f4f3d1fa7132ba89805366472c9fe5983" ]
[ "RewardLearning.py" ]
[ "from gensim.models.keyedvectors import KeyedVectors\nimport json\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.metrics import *\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.optimizers import *\nfrom tensorflow.keras import backend as K\nimport os\nfrom random import shuffle\nimport re\nimport time\nfrom tqdm import tqdm\nimport traceback\n\nimport numpy as np\nimport pandas as pd\nfrom argparse import ArgumentParser\nimport random\n\n\nclass RewardLearning():\n \n def __init__(self, fold, seed, action_space, metric):\n self.reward_report_template = 'reward_report_{}_{}_.*.csv'\n word_embed_file_path='./damd_multiwoz/data/embeddings/glove.6B.100d.w2v.txt'\n \n self.train_val_fraction=0.8\n self.EMBED_DIM=100\n self.HIDDEN_DIM=100\n self.MAX_POP=10\n self.MAX_TIME_STEP=30\n self.MAX_GOAL_LEN=50\n self.MAX_STATE_LEN=50\n self.MAX_ACT_LEN=50\n self.reduce_lr_patience = 10\n self.es_patience = 25\n self.train_reward_split=[0.8,0.9][1]\n \n self.batch_size = 50\n self.num_epoch = 100\n \n self.fold = fold\n self.metric = metric\n self.TRAIN_ON=action_space\n \n self.root_path = './damd_multiwoz'\n self.dataset=json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd_reward_{}.json'.format(self.fold)),'r').read())\n self.glove_kv = KeyedVectors.load_word2vec_format(word_embed_file_path, binary=False, unicode_errors='ignore')\n\n self.reward_folder_path= os.path.join(self.root_path,'data/multi-woz-oppe/reward')\n self.data_for_damd = json.loads(open(os.path.join(self.root_path,'data/multi-woz-processed/data_for_damd.json'), 'r').read())\n \n self.processed_reward_rollouts = None\n self.embed_cache = {}\n\n\n\n def metric_score(self, sucess,match,bleu):\n return sucess+match+2*bleu/100\n \n def load_reward_rollouts(self):\n reward_record_file_prefix = self.reward_report_template.format(self.fold, self.metric)\n print('reward_record_file_prefix:',reward_record_file_prefix)\n rollouts_processed = {}\n for file in os.listdir(self.reward_folder_path):\n if re.search(reward_record_file_prefix,file):\n print('file:',file)\n reward_record_path = os.path.join(self.reward_folder_path,file)\n df = pd.read_csv(reward_record_path)\n for _,row in df.iterrows():\n dial_id = row['dial_id']\n rollout = json.loads(row['rollout'])\n turn_nums = [int(z) for z in rollout.keys()]\n turn_nums = sorted(turn_nums)\n \n if dial_id not in rollouts_processed:\n rollouts_processed[dial_id]={}\n rollouts_processed[dial_id]['gen']=[]\n \n dia_rollout={}\n rollouts_processed[dial_id]['gen'].append(dia_rollout)\n dia_rollout['score'] = self.metric_score(row['success'],row['match'],row['bleu'])\n \n dia_rollout['rollout']=[]\n for turn_num in turn_nums:\n true_act_prob = [1.]\n if 'aspn_prob' in rollout[str(turn_num)]:\n true_act_prob = np.exp(rollout[str(turn_num)]['aspn_prob']).tolist()\n dia_rollout['rollout'].append({\n 'tn':turn_num,\n 'act':rollout[str(turn_num)]['aspn_gen'],\n 'true_act':rollout[str(turn_num)]['aspn'],\n 'resp':rollout[str(turn_num)]['resp_gen'],\n 'true_act_prob':true_act_prob\n })\n \n \n if 'gt' not in rollouts_processed[dial_id]:\n rollouts_processed[dial_id]['gt']={}\n rollouts_processed[dial_id]['gt']['score']=4\n rollouts_processed[dial_id]['gt']['rollout']=[]\n for turn_num in turn_nums:\n rollouts_processed[dial_id]['gt']['rollout'].append({\n 'tn':turn_num,\n 'act':rollout[str(turn_num)]['aspn'],\n 'resp':rollout[str(turn_num)]['resp'],\n 'true_act':rollout[str(turn_num)]['aspn'],\n 'true_act_prob':[1]\n })\n \n self.processed_reward_rollouts = rollouts_processed\n self.dial_ids = list(self.processed_reward_rollouts.keys())\n self.load_gt_dia_logs(self.dial_ids)\n return rollouts_processed\n\n def load_gt_dia_logs(self, dial_ids):\n gt_dia_logs={}\n for dial_id in dial_ids:\n goal = self.goal_as_st(self.dataset[dial_id]['goal'])\n gt_dia_log={\n 'goal':goal\n }\n gt_dia_logs[dial_id]=gt_dia_log\n for turn in self.dataset[dial_id]['log']:\n gt_dia_log[turn['turn_num']]={}\n gt_dia_log[turn['turn_num']]['state']='begin '+turn['cons_delex']+' end'\n \n self.gt_dia_logs = gt_dia_logs \n \n def pad_sentence(self, token_embeds,max_seq_len):\n token_embeds = token_embeds.copy()\n token_embeds = token_embeds[:max_seq_len].tolist()\n \n for i in range(max_seq_len-len(token_embeds)):\n token_embeds.append(np.zeros(self.EMBED_DIM))\n \n token_embeds = np.array(token_embeds)\n return token_embeds\n \n def pad_time_step(self, sentence_embeds,max_seq_len):\n sentence_embeds = sentence_embeds[:self.MAX_TIME_STEP]\n time_padded_sentences = np.array(sentence_embeds)\n if self.MAX_TIME_STEP>len(sentence_embeds):\n pad = np.zeros((self.MAX_TIME_STEP-len(sentence_embeds),max_seq_len,self.EMBED_DIM))\n time_padded_sentences = np.concatenate([sentence_embeds,pad])\n return time_padded_sentences\n \n def get_embedding(self, token):\n token = token.lower()\n token = token.replace('reqt','request')\\\n .replace('arriveby','arrive_by')\\\n .replace('towninfo','town_info')\\\n .replace('pricerange','price_range')\\\n .replace('leaveat','leave_at')\\\n .replace('mutliple','multiple')\\\n .replace('dontcare','dont_care')\\\n .replace('-','')\\\n .replace('addres','address')\\\n .replace('addressss','address')\\\n .replace('addresss','address')\n token = token.strip()\n if token in self.embed_cache:\n return self.embed_cache[token]\n if token in self.glove_kv:\n embedding = self.glove_kv[token]\n else:\n if '_' in token:\n embeds = []\n for sub_token in token.split('_'):\n embeds.append(self.get_embedding(sub_token))\n embedding = np.mean(embeds,axis=0)\n else:\n #print('token not in embed:',token)\n embedding = self.glove_kv['unk']\n self.embed_cache[token]=embedding\n return embedding\n \n def tokens_to_embeddings(self, tokens):\n embeddings = []\n for token in tokens:\n embeddings.append(self.get_embedding(token))\n return np.array(embeddings)\n \n def tokenize(self, sentence):\n sentence=sentence.lower()\n sentence = sentence.replace('[',' ').replace(']',' ').replace(':','').replace(' ',' ')\n \n return sentence.split()\n \n def goal_as_st(self, goal):\n return str(goal).replace(\"'\",' ')\\\n .replace(',',' , ').replace('{',' ')\\\n .replace('}',' ').replace(' ',' ')\n \n def sample_roll_out(self, dial_id):\n start = time.time()\n gen_rollouts_info = self.processed_reward_rollouts[dial_id]['gen']\n gt_rollout_info = self.processed_reward_rollouts[dial_id]['gt']\n rollout_infos = np.random.choice(gen_rollouts_info+[gt_rollout_info], size=2, replace=False)\n #print(rollout_infos)\n \n dia_log= self.gt_dia_logs[dial_id]\n goal = dia_log['goal']\n \n goal = self.tokenize(goal)\n goal = self.tokens_to_embeddings(goal)\n goal = self.pad_sentence(goal, self.MAX_GOAL_LEN)\n \n rollout_pairs = []\n for rollout_info in rollout_infos:\n acts = []\n states = []\n for turn in rollout_info['rollout']:\n tn = turn['tn']\n act = turn[self.TRAIN_ON]#turn['act']\n \n if tn not in self.gt_dia_logs[dial_id]:\n break\n \n state = self.gt_dia_logs[dial_id][tn]['state']\n \n# if random.uniform(0,1)>0.95:\n# print('act:',act)\n# print('state:',state)\n act = self.tokenize(act)\n state = self.tokenize(state)\n \n act = self.tokens_to_embeddings(act)\n state = self.tokens_to_embeddings(state)\n \n act = self.pad_sentence(act,self.MAX_ACT_LEN)\n state = self.pad_sentence(state,self.MAX_STATE_LEN)\n \n acts.append(act)\n states.append(state)\n \n acts=self.pad_time_step(acts,self.MAX_ACT_LEN)\n states=self.pad_time_step(states,self.MAX_STATE_LEN)\n \n score=rollout_info['score']\n rollout_pairs.append([goal,states,acts,score])\n prob = rollout_pairs[0][-1]/(rollout_pairs[0][-1]+rollout_pairs[1][-1]+1e-20)\n rollout_pairs[0][-1]=prob\n rollout_pairs[1][-1]=1-prob\n \n return rollout_pairs\n \n def get_data_gen(self, sample_roll_out):\n def data_gen(dial_ids,batch_size):\n try:\n s1s = []\n a1s = []\n g1s = []\n \n s2s = []\n a2s = []\n g2s = []\n \n probs = []\n while True:\n shuffle(dial_ids)\n for dial_id in dial_ids:\n rollout_pair = sample_roll_out(dial_id)\n g1,s1,a1,p1=rollout_pair[0]\n g2,s2,a2,p2=rollout_pair[1]\n \n s1s.append(s1)\n a1s.append(a1)\n g1s.append(g1)\n s2s.append(s2)\n a2s.append(a2)\n g2s.append(g2)\n \n probs.append([p1,p2])\n \n if len(s1s)>=batch_size:\n s1s = np.array(s1s)\n a1s = np.array(a1s)\n g1s = np.array(g1s)\n \n s2s = np.array(s2s)\n a2s = np.array(a2s)\n g2s = np.array(g2s)\n \n #print('as:',np.sum(a1s-a2s))\n \n probs = np.array(probs)\n yield [s1s,a1s,g1s,s2s,a2s,g2s],probs\n s1s = []\n a1s = []\n g1s = []\n \n s2s = []\n a2s = []\n g2s = []\n \n probs = []\n \n except Exception as e:\n print(traceback.format_exc())\n raise e\n \n return data_gen\n \n \n def build_reward_model(self):\n s_bilstm = Bidirectional(LSTM(self.HIDDEN_DIM)) \n a_bilstms = [Conv1D(self.HIDDEN_DIM,1,activation='tanh'),\n Conv1D(self.HIDDEN_DIM,1,activation='tanh'),\n Lambda(lambda z:K.mean(z,axis=-2))]\n a_bilstms=[Bidirectional(LSTM(self.HIDDEN_DIM))] \n g_bilstm = Bidirectional(LSTM(self.HIDDEN_DIM)) \n \n \n reward_convs=[]\n reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh'))\n reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh'))\n reward_convs.append(Dense(self.HIDDEN_DIM,activation='tanh'))\n reward_convs.append(Dense(1,activation='sigmoid'))\n \n s = Input(shape=(self.MAX_STATE_LEN, self.EMBED_DIM))\n a = Input(shape=(self.MAX_ACT_LEN, self.EMBED_DIM))\n g = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM))\n \n s_h = s_bilstm(s)\n a_h = a\n for layer in a_bilstms:\n a_h = layer(a_h)\n g_h = g_bilstm(g)\n \n #s_h = Lambda(lambda z:z*1e-20)(s_h)\n #g_h = Lambda(lambda z:z*1e-20)(g_h)\n \n reward = Concatenate(axis=-1)([s_h,a_h,g_h])\n for reward_conv in reward_convs:\n reward = reward_conv(reward)\n reward = Lambda(lambda z:K.squeeze(z,axis=-1))(reward)\n \n model_reward = Model(inputs=[s,a,g],outputs=reward)\n model_reward.summary()\n return model_reward\n \n def _build_reward_flatten_model(self):\n x = Input(shape=(self.MAX_STATE_LEN + self.MAX_ACT_LEN + self.MAX_GOAL_LEN, self.EMBED_DIM))\n s=Lambda(lambda z:z[:,:self.MAX_STATE_LEN])(x)\n a=Lambda(lambda z:z[:,self.MAX_STATE_LEN : self.MAX_STATE_LEN + self.MAX_ACT_LEN])(x)\n g=Lambda(lambda z:z[:,self.MAX_STATE_LEN + self.MAX_ACT_LEN:])(x)\n \n reward = self.model_reward([s,a,g])\n model_reward_flatten = Model(x,reward)\n model_reward_flatten.summary()\n return model_reward_flatten\n \n def _build_cummulative_reward_model(self):\n \n model_reward_flatten = self._build_reward_flatten_model()\n \n s = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM))\n a = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM))\n g = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM))\n \n g_padded = Lambda(lambda z:K.expand_dims(z,axis=1))(g)\n g_padded = Lambda(lambda z:K.repeat_elements(z, self.MAX_TIME_STEP,axis=1))(g_padded)\n \n comb_inp = Concatenate(axis=2)([s,a,g_padded])\n \n rewards = TimeDistributed(model_reward_flatten)(comb_inp)\n \n \n returns = Lambda(lambda z:K.sum(z,axis=1,keepdims=True))(rewards)\n \n model_cummulative_reward = Model([s,a,g],returns)\n model_cummulative_reward.summary()\n return model_cummulative_reward\n \n def _build_preferential_model(self):\n \n model_cummulative_reward = self._build_cummulative_reward_model()\n \n s_1 = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM))\n a_1 = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM))\n g_1 = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM))\n \n s_2 = Input(shape=(self.MAX_TIME_STEP, self.MAX_STATE_LEN, self.EMBED_DIM))\n a_2 = Input(shape=(self.MAX_TIME_STEP, self.MAX_ACT_LEN, self.EMBED_DIM))\n g_2 = Input(shape=(self.MAX_GOAL_LEN, self.EMBED_DIM))\n \n chi_1 = model_cummulative_reward([s_1,a_1,g_1])\n chi_2 = model_cummulative_reward([s_2,a_2,g_2])\n \n chi = Concatenate()([chi_1,chi_2])\n #Pref = Activation('softmax')(chi)\n Pref = Lambda(lambda z:z/K.sum(z,axis=-1,keepdims=True))(chi)\n \n model_preferential = Model([s_1,a_1,g_1,s_2,a_2,g_2],Pref)\n model_preferential.summary()\n return model_preferential\n \n \n \n def get_reward(self, input_seq):\n g = []\n s = []\n a = []\n for goal,state, aspn, resp in input_seq:\n \n state_tokens = self.tokenize(state)\n state_token_embeds = self.tokens_to_embeddings(state_tokens)\n state_token_embeds = self.pad_sentence(state_token_embeds, self.MAX_STATE_LEN)\n s.append(state_token_embeds)\n \n if self.TRAIN_ON=='act':\n action_tokens = self.tokenize(aspn)\n elif self.TRAIN_ON=='resp':\n action_tokens = self.tokenize(resp)\n else:\n raise Exception('Invalid TRAIN_ON selection')\n action_token_embeds = self.tokens_to_embeddings(action_tokens)\n action_token_embeds = self.pad_sentence(action_token_embeds, self.MAX_ACT_LEN)\n a.append(action_token_embeds)\n \n goal_tokens = self.tokenize(goal)\n goal_token_embeds = self.tokens_to_embeddings(goal_tokens)\n goal_token_embeds = self.pad_sentence(goal_token_embeds, self.MAX_GOAL_LEN)\n g.append(goal_token_embeds)\n \n rewards = self.model_reward.predict([np.array(s),np.array(a),np.array(g)])\n #print('aspn:',aspn,':',reward)\n \n return rewards\n \n def get_Gs(self, gamma=0.9):\n fn_Gs = {}\n num_fns = len(self.data_for_damd.keys())\n for ex_num,fn in enumerate(tqdm(reversed(list(self.data_for_damd.keys())),total=num_fns)):\n #print('%:{0.2f}'.format(ex_num/num_fns),end='')\n next_state=None\n \n fn_Gs[fn] = {}\n goal = self.goal_as_st(self.data_for_damd[fn]['goal'])\n \n turn_num_inp_seq = {}\n for turn in self.data_for_damd[fn]['log']:\n turn_num = turn['turn_num']\n resp = turn['resp']\n state = 'begin '+turn['cons_delex']+' end'#turn['cons_delex']\n aspn = turn['sys_act']\n \n turn_num_inp_seq[turn_num]=[goal,state,aspn,resp]\n \n reverse_turn_nums = sorted(list(turn_num_inp_seq.keys()),reverse=True)\n inp_seq = []\n for turn_num in reverse_turn_nums:\n inp_seq.append(turn_num_inp_seq[turn_num])\n \n rewards = self.get_reward(inp_seq)\n G = 0\n for turn_num,reward in zip(reverse_turn_nums,rewards):\n G = reward + gamma*G\n fn_Gs[fn][turn_num] = {\n 'G':G,\n 'gamma':gamma\n }\n return fn_Gs\n\n def compile_models(self):\n self.model_reward = self.build_reward_model()\n self.model_preferential = self._build_preferential_model()\n self.model_preferential.compile(loss='categorical_crossentropy', optimizer='adam')\n\n def train_model(self):\n shuffle(self.dial_ids)\n train_dial_ids = self.dial_ids[:int(len(self.dial_ids) * self.train_val_fraction)]\n val_dial_ids = self.dial_ids[int(len(self.dial_ids) * self.train_val_fraction):]\n \n train_num_examples = len(train_dial_ids)\n valid_num_examples = len(val_dial_ids)\n\n print('train_num_examples:',train_num_examples)\n print('valid_num_examples:',valid_num_examples)\n \n train_num_examples_per_epoch = max(3,int((train_num_examples/self.batch_size)/10))\n \n train_data_gen = self.get_data_gen(self.sample_roll_out)(train_dial_ids, self.batch_size)\n val_data_gen = self.get_data_gen(self.sample_roll_out)(val_dial_ids, self.batch_size)\n \n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=self.reduce_lr_patience, min_lr=0.000001,verbose=1)\n early_stopping = EarlyStopping(monitor='val_loss', patience=self.es_patience, verbose=1, restore_best_weights=True)\n\n self.model_preferential.fit_generator(train_data_gen,\n steps_per_epoch = train_num_examples_per_epoch, \n validation_data = val_data_gen,\n validation_steps = max(1,int(valid_num_examples/(self.batch_size))),\n callbacks = [reduce_lr,early_stopping],\n epochs = self.num_epoch, \n )\n\n def save_returns(self, gamma=0.):\n num_fns = len(self.data_for_damd.keys())\n fn_Gs = self.get_Gs(gamma=gamma)\n fn_G_file_name = 'fn_Gs_{}_{}_{}_{}.json'.format(self.fold, gamma, self.TRAIN_ON, self.metric)\n \n print(fn_G_file_name)\n fn_Gs_file_path = os.path.join(self.root_path,'data','multi-woz-oppe',fn_G_file_name)\n print('fn_Gs_file_path:',fn_Gs_file_path)\n with open(fn_Gs_file_path,'w') as f:\n json.dump(fn_Gs,f)\n\n \nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"-s\", \"--seed\", dest=\"seed\",\n default=11,\n type=int,\n help=\"seed\")\n parser.add_argument(\"-K\", \"--folds\",\n dest=\"folds\", default=10,\n type=int,\n help=\"Number of folds\")\n parser.add_argument(\"-a\", \"--action_space\",\n dest=\"action_space\",\n choices={\"act\", \"resp\"},\n default='act',\n help=\"action space. can either be act or resp\")\n parser.add_argument(\"-m\", \"--metric\",\n dest=\"metric\",\n choices={\"hard\", \"soft\"},\n default='soft',\n help=\"metric used for pairwise reward candidate generation\")\n parser.add_argument(\"-g\", \"--gamma\",\n dest=\"gamma\",\n default=0.0,\n type=float,\n help=\"The discount factor used in reward learning\")\n args = parser.parse_args()\n \n print('param:',args) \n rewardLearning = RewardLearning(args.folds, args.seed, args.action_space, args.metric)\n rewardLearning.load_reward_rollouts()\n rewardLearning.compile_models()\n rewardLearning.train_model()\n rewardLearning.save_returns(args.gamma)\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.keras.backend.repeat_elements", "pandas.read_csv", "tensorflow.keras.models.Model", "numpy.random.choice", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.backend.sum", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.backend.squeeze", "numpy.concatenate", "numpy.mean", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.mean", "numpy.array", "numpy.zeros" ] ]
ChangminWu/ExpanderGNN
[ "a88bea8ee15d902be2881ec59ec37a8b092a0cd8" ]
[ "nets/citation_node_classification/gin_net.py" ]
[ "import torch.nn as nn\n\nfrom dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling\n\nfrom layers.gin_layer import GINLayer\nfrom layers.expander.expander_layer import LinearLayer, MultiLinearLayer\nfrom utils import activations\n\n\nclass GINNet(nn.Module):\n def __init__(self, net_params):\n super(GINNet, self).__init__()\n indim = net_params[\"in_dim\"]\n hiddim = net_params[\"hidden_dim\"]\n\n n_classes = net_params[\"n_classes\"]\n dropout = net_params[\"dropout\"]\n self.n_layers = net_params[\"L\"]\n\n self.graph_pool = net_params[\"graph_pool\"]\n self.neighbor_pool = net_params[\"neighbor_pool\"]\n\n self.residual = net_params[\"residual\"]\n self.batch_norm = net_params[\"batch_norm\"]\n self.n_mlp_layer = net_params[\"mlp_layers\"]\n\n self.activation = activations(net_params[\"activation\"])\n self.linear_type = net_params[\"linear_type\"]\n self.density = net_params[\"density\"]\n self.sampler = net_params[\"sampler\"]\n self.bias = net_params[\"bias\"]\n self.learn_eps = net_params[\"learn_eps\"]\n\n linear_params = {\"density\": self.density, \"sampler\": self.sampler}\n\n self.layers = nn.ModuleList()\n linear_transform = \\\n MultiLinearLayer(indim, hiddim,\n activation=self.activation,\n batch_norm=self.batch_norm,\n num_layers=self.n_mlp_layer,\n hiddim=hiddim,\n bias=self.bias,\n linear_type=self.linear_type,\n **linear_params)\n self.layers.append(GINLayer(linear_transform,\n aggr_type=self.neighbor_pool,\n activation=self.activation,\n dropout=dropout,\n batch_norm=self.batch_norm,\n residual=self.residual,\n learn_eps=self.learn_eps))\n\n for i in range(self.n_layers-1):\n linear_transform = \\\n MultiLinearLayer(hiddim, hiddim,\n activation=self.activation,\n batch_norm=self.batch_norm,\n num_layers=self.n_mlp_layer,\n hiddim=hiddim,\n bias=self.bias,\n linear_type=self.linear_type,\n **linear_params)\n self.layers.append(GINLayer(linear_transform,\n aggr_type=self.neighbor_pool,\n activation=self.activation,\n dropout=dropout,\n batch_norm=self.batch_norm,\n residual=self.residual,\n learn_eps=self.learn_eps))\n\n\n\n self.linear_predictions = nn.ModuleList()\n self.linear_predictions.append(\n LinearLayer(indim,\n n_classes, bias=self.bias,\n linear_type=\"regular\",\n **linear_params))\n\n for _ in range(self.n_layers):\n self.linear_predictions.append(\n LinearLayer(hiddim,\n n_classes, bias=self.bias,\n linear_type=\"regular\",\n **linear_params))\n\n def forward(self, g, h, e):\n with g.local_scope():\n g = g.to(h.device)\n\n hidden_rep = [h]\n\n for i in range(self.n_layers):\n h = self.layers[i](g, h)\n hidden_rep.append(h)\n\n score_over_layer = 0\n for i, h in enumerate(hidden_rep):\n score_over_layer += self.linear_predictions[i](h)\n\n return score_over_layer\n\n def loss(self, pred, label):\n criterion = nn.CrossEntropyLoss()\n loss = criterion(pred, label)\n return loss" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.ModuleList" ] ]
raafatzahran/Udacity-DataScience
[ "3e875838cb602865d8b9786fbe940d0704771fca" ]
[ "Lesson7-ensemble_methods/ensemnle-methods/venv/lib/python3.6/site-packages/imblearn/utils/tests/test_validation.py" ]
[ "\"\"\"Test for the validation helper\"\"\"\n# Authors: Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# License: MIT\n\nfrom collections import Counter\n\nfrom pytest import raises\nimport numpy as np\n\nfrom sklearn.neighbors.base import KNeighborsMixin\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom imblearn.utils.testing import warns\n\nfrom imblearn.utils import check_neighbors_object\nfrom imblearn.utils import check_ratio\n\n\ndef test_check_neighbors_object():\n name = 'n_neighbors'\n n_neighbors = 1\n estimator = check_neighbors_object(name, n_neighbors)\n assert issubclass(type(estimator), KNeighborsMixin)\n assert estimator.n_neighbors == 1\n estimator = check_neighbors_object(name, n_neighbors, 1)\n assert issubclass(type(estimator), KNeighborsMixin)\n assert estimator.n_neighbors == 2\n estimator = NearestNeighbors(n_neighbors)\n assert estimator is check_neighbors_object(name, estimator)\n n_neighbors = 'rnd'\n with raises(ValueError, match=\"has to be one of\"):\n check_neighbors_object(name, n_neighbors)\n\n\ndef test_check_ratio_error():\n with raises(ValueError, match=\"'sampling_type' should be one of\"):\n check_ratio('auto', np.array([1, 2, 3]), 'rnd')\n\n error_regex = \"The target 'y' needs to have more than 1 class.\"\n with raises(ValueError, match=error_regex):\n check_ratio('auto', np.ones((10, )), 'over-sampling')\n\n error_regex = \"When 'ratio' is a string, it needs to be one of\"\n with raises(ValueError, match=error_regex):\n check_ratio('rnd', np.array([1, 2, 3]), 'over-sampling')\n\n\ndef test_ratio_all_over_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n for each in ('all', 'auto'):\n assert check_ratio(each, y, 'over-sampling') == {1: 50, 2: 0, 3: 75}\n\n\ndef test_ratio_all_under_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = check_ratio('all', y, 'under-sampling')\n assert ratio == {1: 25, 2: 25, 3: 25}\n\n\ndef test_ratio_majority_over_sampling():\n error_regex = \"'ratio'='majority' cannot be used with over-sampler.\"\n with raises(ValueError, match=error_regex):\n check_ratio('majority', np.array([1, 2, 3]), 'over-sampling')\n\n\ndef test_ratio_majority_under_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = check_ratio('majority', y, 'under-sampling')\n assert ratio == {2: 25}\n\n\ndef test_ratio_not_minority_over_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = check_ratio('not minority', y, 'over-sampling')\n assert ratio == {1: 50, 2: 0}\n\n\ndef test_ratio_not_minority_under_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = check_ratio('not minority', y, 'under-sampling')\n assert ratio == {1: 25, 2: 25}\n ratio = check_ratio('auto', y, 'under-sampling')\n assert ratio == {1: 25, 2: 25}\n\n\ndef test_ratio_minority_over_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = check_ratio('minority', y, 'over-sampling')\n assert ratio == {3: 75}\n\n\ndef test_ratio_minority_under_sampling():\n error_regex = \"'ratio'='minority' cannot be used with under-sampler.\"\n with raises(ValueError, match=error_regex):\n check_ratio('minority', np.array([1, 2, 3]), 'under-sampling')\n\n\ndef test_ratio_dict_error():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = {1: -100, 2: 50, 3: 25}\n with raises(ValueError, match=\"in a class cannot be negative.\"):\n check_ratio(ratio, y, 'under-sampling')\n ratio = {10: 10}\n with raises(ValueError, match=\"are not present in the data.\"):\n check_ratio(ratio, y, 'over-sampling')\n ratio = {1: 45, 2: 100, 3: 70}\n error_regex = (\"With over-sampling methods, the number of samples in a\"\n \" class should be greater or equal to the original number\"\n \" of samples. Originally, there is 50 samples and 45\"\n \" samples are asked.\")\n with raises(ValueError, match=error_regex):\n check_ratio(ratio, y, 'over-sampling')\n\n error_regex = (\"With under-sampling methods, the number of samples in a\"\n \" class should be less or equal to the original number of\"\n \" samples. Originally, there is 25 samples and 70 samples\"\n \" are asked.\")\n with raises(ValueError, match=error_regex):\n check_ratio(ratio, y, 'under-sampling')\n\n\ndef test_ratio_dict_over_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = {1: 70, 2: 100, 3: 70}\n ratio_ = check_ratio(ratio, y, 'over-sampling')\n assert ratio_ == {1: 20, 2: 0, 3: 45}\n ratio = {1: 70, 2: 140, 3: 70}\n expected_msg = (\"After over-sampling, the number of samples \\(140\\) in\"\n \" class 2 will be larger than the number of samples in the\"\n \" majority class \\(class #2 -> 100\\)\")\n with warns(UserWarning, expected_msg):\n check_ratio(ratio, y, 'over-sampling')\n\n\ndef test_ratio_dict_under_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = {1: 30, 2: 45, 3: 25}\n ratio_ = check_ratio(ratio, y, 'under-sampling')\n assert ratio_ == ratio\n\n\ndef test_ratio_float_error():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = -10\n error_regex = \"When 'ratio' is a float, it should in the range\"\n with raises(ValueError, match=error_regex):\n check_ratio(ratio, y, 'under-sampling')\n ratio = 10\n with raises(ValueError, match=error_regex):\n check_ratio(ratio, y, 'under-sampling')\n\n\ndef test_ratio_float_over_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = 0.5\n ratio_ = check_ratio(ratio, y, 'over-sampling')\n assert ratio_ == {1: 0, 3: 25}\n\n\ndef test_ratio_float_under_sampling():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n ratio = 0.5\n ratio_ = check_ratio(ratio, y, 'under-sampling')\n assert ratio_ == {1: 50, 2: 50}\n\n\ndef test_ratio_callable():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n\n def ratio_func(y):\n # this function could create an equal number of samples\n target_stats = Counter(y)\n n_samples = max(target_stats.values())\n return {key: int(n_samples)\n for key in target_stats.keys()}\n\n ratio_ = check_ratio(ratio_func, y, 'over-sampling')\n assert ratio_ == {1: 50, 2: 0, 3: 75}\n\n\ndef test_ratio_callable_args():\n y = np.array([1] * 50 + [2] * 100 + [3] * 25)\n multiplier = {1: 1.5, 2: 1, 3: 3}\n\n def ratio_func(y, multiplier):\n \"\"\"samples such that each class will be affected by the multiplier.\"\"\"\n target_stats = Counter(y)\n return {key: int(values * multiplier[key])\n for key, values in target_stats.items()}\n\n ratio_ = check_ratio(ratio_func, y, 'over-sampling',\n multiplier=multiplier)\n assert ratio_ == {1: 25, 2: 0, 3: 50}\n" ]
[ [ "numpy.array", "sklearn.neighbors.NearestNeighbors", "numpy.ones" ] ]
AndreeaDRacovita/face-recognition
[ "cd1ae58b785034d4e1fdf2c01bc4e32086f099e4" ]
[ "face-recognition.py" ]
[ "import numpy as np\nimport cv2 as cv\nimport face_recognition\n\nvideo_capture = cv.VideoCapture(0, cv.CAP_DSHOW)\n\n# Haar cascade face recognition\nhaar_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\nface_recognizer = cv.face.LBPHFaceRecognizer_create()\nface_recognizer.read('face_trained.yml')\npeople = ['Andreea', 'Black Widow', 'Peter Parker', 'Tony Stark']\n\n# dlib Face Recognition\nwidow_image = face_recognition.load_image_file('known_people/Black Widow.png')\nwidow_face_encoding = face_recognition.face_encodings(widow_image)[0]\n\nparker_image = face_recognition.load_image_file('known_people/Peter Parker.jpg')\nparker_face_encoding = face_recognition.face_encodings(parker_image)[0]\n\nstark_image = face_recognition.load_image_file('known_people/Tony Stark.jpg')\nstark_face_encoding = face_recognition.face_encodings(stark_image)[0]\n\nandreea_image = face_recognition.load_image_file('known_people/Andreea.png')\nandreea_face_encoding = face_recognition.face_encodings(andreea_image)[0]\n\nknown_face_encodings = [\n widow_face_encoding,\n parker_face_encoding,\n stark_face_encoding,\n andreea_face_encoding\n]\n\nknown_face_names = [\n \"Black Widow\",\n \"Peter Parker\",\n \"Tony Stark\",\n \"Andreea\"\n]\n\nface_locations = []\nface_encodings = []\nface_names = []\n\n\ndef haar_cascade_recognition(img):\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # Detect the face in the image\n faces_rect = haar_cascade.detectMultiScale(gray, 1.1, 5)\n for (x, y, w, h) in faces_rect:\n faces_roi = gray[y:y + h, x:x + w]\n\n label, confidence = face_recognizer.predict(faces_roi)\n\n # Draw a box around the face\n cv.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 1)\n\n # Draw a label with a name below the face\n cv.rectangle(img, (x, y+h), (x+w, y+h+20), (0, 255, 0), -1)\n font = cv.FONT_HERSHEY_DUPLEX\n cv.putText(img, str(people[label]), (x+2, y+h+15), font, 0.5, (255, 255, 255), 1)\n\n cv.imshow('Haar Cascade Recognition', img)\n\n\ndef dlib_face_recognition(img):\n rgb_frame = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n face_locations = face_recognition.face_locations(rgb_frame)\n face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n matches = face_recognition.compare_faces(known_face_encodings,face_encoding)\n name = \"Unknown\"\n\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n cv.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 1)\n\n cv.rectangle(img, (left, bottom), (right, bottom+20), (0, 255, 0), -1)\n font = cv.FONT_HERSHEY_DUPLEX\n cv.putText(img, name, (left+6, bottom+15), font, 0.5, (255, 255, 255), 1)\n\n cv.imshow('dlib Face Recognition', img)\n\n\nwhile True:\n ret, frame = video_capture.read()\n if ret == False:\n break\n\n ratio = frame.shape[0]/frame.shape[1]\n width = 400\n height = int(width*ratio)\n frame = cv.resize(frame, (width, height))\n\n haar_cascade_recognition(frame.copy())\n dlib_face_recognition(frame.copy())\n\n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo_capture.release()\ncv.destroyAllWindows()\n" ]
[ [ "numpy.argmin" ] ]
EvelynYihuiYang/MCMOT
[ "8ea20b57d836cc8f8efe1b13dead3e5d8511c16d" ]
[ "src/lib/utils/image.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport random\n\n\ndef flip(img):\n return img[:, :, ::-1].copy()\n\n\ndef transform_preds(coords, center, scale, output_size):\n \"\"\"\n :param coords:\n :param center:\n :param scale:\n :param output_size:\n :return:\n \"\"\"\n target_coords = np.zeros(coords.shape)\n\n # affine matrix: only scale and translation(no rotation)\n trans = get_affine_transform(center=center, scale=scale, rot=0, output_size=output_size, inv=1)\n\n for p in range(coords.shape[0]):\n target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n\n return target_coords\n\n\ndef get_affine_transform(center,\n scale,\n rot,\n output_size,\n shift=np.array([0, 0], dtype=np.float32),\n inv=0):\n \"\"\"\n :param center:\n :param scale:\n :param rot:\n :param output_size:\n :param shift:\n :param inv:\n :return:\n \"\"\"\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale], dtype=np.float32)\n\n scale_tmp = scale\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180.0\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\n\ndef crop(img, center, scale, output_size, rot=0):\n trans = get_affine_transform(center, scale, rot, output_size)\n\n dst_img = cv2.warpAffine(img,\n trans,\n (int(output_size[0]), int(output_size[1])),\n flags=cv2.INTER_LINEAR)\n\n return dst_img\n\n\ndef gaussian_radius(det_size, min_overlap=0.7):\n height, width = det_size\n\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n\n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 + sq2) / 2\n\n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / 2\n return min(r1, r2, r3)\n\n\ndef gaussian2D(shape, sigma=1):\n \"\"\"\n :param shape:\n :param sigma:\n :return:\n \"\"\"\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n\n return h\n\n\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n \"\"\"\n :param heatmap:\n :param center:\n :param radius:\n :param k:\n :return:\n \"\"\"\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n\n x, y = int(center[0]), int(center[1])\n\n height, width = heatmap.shape[0:2]\n left, right = min(x, radius), min(width - x, radius + 1)\n top, bottom = min(y, radius), min(height - y, radius + 1)\n\n masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n return heatmap\n\n\ndef draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)\n dim = value.shape[0]\n reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value\n if is_offset and dim == 2:\n delta = np.arange(diameter * 2 + 1) - radius\n reg[0] = reg[0] - delta.reshape(1, -1)\n reg[1] = reg[1] - delta.reshape(-1, 1)\n\n x, y = int(center[0]), int(center[1])\n\n height, width = heatmap.shape[0:2]\n\n left, right = min(x, radius), min(width - x, radius + 1)\n top, bottom = min(y, radius), min(height - y, radius + 1)\n\n masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]\n masked_gaussian = gaussian[radius - top:radius + bottom,\n radius - left:radius + right]\n masked_reg = reg[:, radius - top:radius + bottom,\n radius - left:radius + right]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n idx = (masked_gaussian >= masked_heatmap).reshape(\n 1, masked_gaussian.shape[0], masked_gaussian.shape[1])\n masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg\n regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap\n return regmap\n\n\ndef draw_msra_gaussian(heatmap, center, sigma):\n \"\"\"\n :param heatmap:\n :param center:\n :param sigma:\n :return:\n \"\"\"\n tmp_size = sigma * 3\n mu_x = int(center[0] + 0.5)\n mu_y = int(center[1] + 0.5)\n w, h = heatmap.shape[0], heatmap.shape[1]\n ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] # uper left\n br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] # bottom right\n\n if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:\n return heatmap\n\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n g_x = max(0, -ul[0]), min(br[0], h) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], w) - ul[1]\n img_x = max(0, ul[0]), min(br[0], h)\n img_y = max(0, ul[1]), min(br[1], w)\n heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(\n heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],\n g[g_y[0]:g_y[1], g_x[0]:g_x[1]])\n\n return heatmap\n\n\ndef grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\ndef lighting_(data_rng, image, alphastd, eigval, eigvec):\n alpha = data_rng.normal(scale=alphastd, size=(3,))\n image += np.dot(eigvec, eigval * alpha)\n\n\ndef blend_(alpha, image1, image2):\n image1 *= alpha\n image2 *= (1 - alpha)\n image1 += image2\n\n\ndef saturation_(data_rng, image, gs, gs_mean, var):\n alpha = 1. + data_rng.uniform(low=-var, high=var)\n blend_(alpha, image, gs[:, :, None])\n\n\ndef brightness_(data_rng, image, gs, gs_mean, var):\n alpha = 1. + data_rng.uniform(low=-var, high=var)\n image *= alpha\n\n\ndef contrast_(data_rng, image, gs, gs_mean, var):\n alpha = 1. + data_rng.uniform(low=-var, high=var)\n blend_(alpha, image, gs_mean)\n\n\ndef color_aug(data_rng, image, eig_val, eig_vec):\n functions = [brightness_, contrast_, saturation_]\n random.shuffle(functions)\n\n gs = grayscale(image)\n gs_mean = gs.mean()\n for f in functions:\n f(data_rng, image, gs, gs_mean, 0.4)\n lighting_(data_rng, image, 0.1, eig_val, eig_vec)\n" ]
[ [ "numpy.dot", "numpy.maximum", "numpy.sqrt", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.ones", "numpy.finfo", "numpy.float32", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
denred0/mediapipe
[ "e9f4c7b54e68ccd8ea3bca100420d21b2ae6bbe1" ]
[ "denred0_src/inference.py" ]
[ "import os\nimport cv2\nimport sys\nimport time\nimport torch\n\nimport numpy as np\n\nimport albumentations as A\n\nfrom pathlib import Path\n\nfrom hand_detection import get_hands_rects\nfrom face_detection import get_face_rect\n\nfrom model import Model\nfrom albumentations.pytorch import ToTensorV2\n\ntransforms = A.Compose([\n A.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]), ToTensorV2()])\n\n\ndef get_model(checkpoint):\n model = Model.load_from_checkpoint(checkpoint_path=best_checkpoint)\n model = model.to(\"cuda\")\n model.eval()\n model.freeze()\n\n return model\n\n\ndef inference(video_src_dir, video_dst_dir, video_name, checkpoint, image_size_model, frame_rate=18):\n cap = cv2.VideoCapture(str(Path(video_src_dir).joinpath(video_name)))\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n print('fps', fps)\n\n time_total = 0\n\n print('frames_count', length)\n\n if not cap.isOpened():\n print(\"Error opening the video file. Please double check your \"\n \"file path for typos. Or move the movie file to the same location as this script/notebook\")\n sys.exit()\n\n img_array = []\n size_shape = ()\n\n model = get_model(checkpoint=checkpoint)\n\n while cap.isOpened():\n # Read the video file.\n ret, image = cap.read()\n\n # If we got frames, show them.\n if ret:\n\n start = time.time()\n # time.sleep(1 / fps)\n\n # decrease resolution of video\n scale_percent = 50 # percent of original size\n width = int(image.shape[1] * scale_percent / 100)\n height = int(image.shape[0] * scale_percent / 100)\n dim = (width, height)\n image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n\n crops_for_prediction = {}\n # get hands rects if hands found\n hands_rects = get_hands_rects(image, min_detection_confidence=0.4)\n for i, rect in enumerate(hands_rects):\n crops_for_prediction['hand_' + str(i)] = rect\n # crops_for_prediction.append(rect)\n\n if crops_for_prediction:\n print()\n face_rect = get_face_rect(image, min_detection_confidence=0.4)\n if face_rect:\n crops_for_prediction['face'] = face_rect\n # image = cv2.rectangle(image, (face_rect[0], face_rect[1]), (face_rect[2], face_rect[3]), (255, 255, 255), 6)\n\n # crops_for_prediction.append(face_rect)\n\n if face_rect:\n print()\n print('face_rect', face_rect)\n\n for key, rect in crops_for_prediction.items():\n\n # for rect in crops_for_prediction:\n\n x = rect[0]\n y = rect[1]\n x2 = rect[2]\n y2 = rect[3]\n\n hand_img = image[y:y2, x:x2]\n\n hand_img_resized = cv2.resize(hand_img, (image_size_model, image_size_model),\n interpolation=cv2.INTER_AREA)\n\n hand_img_resized = transforms(image=hand_img_resized)\n hand_img_resized = torch.unsqueeze(hand_img_resized.get('image'), 0)\n\n y_hat = model(hand_img_resized.to('cuda'))\n\n y_hat = y_hat.cpu().detach().numpy()[0]\n\n y_hat_hand = y_hat[0:2]\n y_hat_face = y_hat[2:4]\n\n if key.startswith('hand'):\n if np.argmax(y_hat_hand) == 1:\n image = cv2.rectangle(image, (x, y), (x2, y2), (0, 0, 255), 6)\n\n if key.startswith('face'):\n if np.argmax(y_hat_face) == 1:\n image = cv2.rectangle(image, (x, y), (x2, y2), (255, 0, 255), 6)\n\n # show frame\n cv2.imshow('image', image)\n\n (H, W) = image.shape[:2]\n size_shape = (W, H)\n\n # for video creation\n # img_array.append(image)\n\n end = time.time()\n\n print(str(end - start))\n time_total += end - start\n\n if cv2.waitKey(int(1000./float(fps))) & 0xFF == ord('q'):\n break\n\n else:\n break\n\n print((time_total) / length)\n # write video\n print(\"Saving video...\")\n out = cv2.VideoWriter(str(Path(video_dst_dir).joinpath(video_name + '_result.avi')),\n cv2.VideoWriter_fourcc(*'DIVX'), frame_rate,\n size_shape)\n for i in range(len(img_array)):\n out.write(img_array[i])\n out.release()\n\n cap.release()\n # Closes all the frames\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n video_src_dir = os.path.join('denred0_data', 'inference', 'source')\n video_dst_dir = os.path.join('denred0_data', 'inference', 'result')\n\n video_name = 'tst_bio.avi' # 'tst_bio.avi'\n\n best_checkpoint = os.path.join('denred0_model', 'best_checkpoint',\n 'senet154_hands_epoch=6_val_loss=0.015_val_acc=0.996_val_f1_epoch=0.996.ckpt')\n image_size_model = 224\n\n frame_rate = 36 # frame rate in resulting video\n\n inference(video_src_dir=video_src_dir,\n video_dst_dir=video_dst_dir,\n video_name=video_name,\n checkpoint=best_checkpoint,\n image_size_model=image_size_model,\n frame_rate=frame_rate)\n" ]
[ [ "numpy.argmax" ] ]
pixelink-support/PixelinkPythonWrapper
[ "ad7ba24c550825eb12f16e1eaf9c66f35db52a8f" ]
[ "samples/Linux/callbackUsingNumPy.py" ]
[ "\"\"\"\r\ncallbackUsingNumPy.py\r\n\r\nDemonstrates how to use callbacks with Callback.PREVIEW, using a NumPy image\r\nThe callback function will modify the preview buffer supplied by the API.\r\n\"\"\"\r\n\r\nfrom pixelinkWrapper import*\r\nfrom ctypes import*\r\nimport time\r\nimport threading\r\nimport numpy as np\r\n\r\n\r\ndef get_pixel_format_as_string(dataFormat):\r\n switcher = {\r\n PxLApi.PixelFormat.MONO8: \"MONO8\",\r\n PxLApi.PixelFormat.MONO16: \"MONO16\",\r\n PxLApi.PixelFormat.YUV422: \"YUV422\",\r\n PxLApi.PixelFormat.BAYER8_GRBG: \"BAYER8_GRBG\",\r\n PxLApi.PixelFormat.BAYER16_GRBG: \"BAYER16_GRBG\",\r\n PxLApi.PixelFormat.RGB24: \"RGB24\",\r\n PxLApi.PixelFormat.RGB48: \"RGB48\",\r\n PxLApi.PixelFormat.BAYER8_RGGB: \"BAYER8_RGGB\",\r\n PxLApi.PixelFormat.BAYER8_GBRG: \"BAYER8_GBRG\",\r\n PxLApi.PixelFormat.BAYER8_BGGR: \"BAYER8_BGGR\",\r\n PxLApi.PixelFormat.BAYER16_RGGB: \"BAYER16_RGGB\",\r\n PxLApi.PixelFormat.BAYER16_GBRG: \"BAYER16_GBRG\",\r\n PxLApi.PixelFormat.BAYER16_BGGR: \"BAYER16_BGGR\",\r\n PxLApi.PixelFormat.MONO12_PACKED: \"MONO12_PACKED\",\r\n PxLApi.PixelFormat.BAYER12_GRBG_PACKED: \"BAYER12_GRBG_PACKED\",\r\n PxLApi.PixelFormat.BAYER12_RGGB_PACKED: \"BAYER12_RGGB_PACKED\",\r\n PxLApi.PixelFormat.BAYER12_GBRG_PACKED: \"BAYER12_GBRG_PACKED\",\r\n PxLApi.PixelFormat.BAYER12_BGGR_PACKED: \"BAYER12_BGGR_PACKED\",\r\n PxLApi.PixelFormat.RGB24_NON_DIB: \"RGB24_NON_DIB\",\r\n PxLApi.PixelFormat.RGB48_DIB: \"RGB48_DIB\",\r\n PxLApi.PixelFormat.MONO12_PACKED_MSFIRST: \"MONO12_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER12_GRBG_PACKED_MSFIRST: \"BAYER12_GRBG_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER12_RGGB_PACKED_MSFIRST: \"BAYER12_RGGB_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER12_GBRG_PACKED_MSFIRST: \"BAYER12_GBRG_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER12_BGGR_PACKED_MSFIRST: \"BAYER12_BGGR_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.MONO10_PACKED_MSFIRST: \"MONO10_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER10_GRBG_PACKED_MSFIRST: \"BAYER10_GRBG_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER10_RGGB_PACKED_MSFIRST: \"BAYER10_RGGB_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER10_GBRG_PACKED_MSFIRST: \"BAYER10_GBRG_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.BAYER10_BGGR_PACKED_MSFIRST: \"BAYER10_BGGR_PACKED_MSFIRST\",\r\n PxLApi.PixelFormat.STOKES4_12: \"STOKES4_12\",\r\n PxLApi.PixelFormat.POLAR4_12: \"POLAR4_12\",\r\n PxLApi.PixelFormat.POLAR_RAW4_12: \"POLAR_RAW4_12\",\r\n PxLApi.PixelFormat.HSV4_12: \"HSV4_12\",\r\n PxLApi.PixelFormat.BGR24_NON_DIB: \"BGR24_NON_DIB\" \r\n }\r\n return switcher.get(dataFormat, \"Unknown data format\")\r\n\r\n\"\"\"\r\nCreates a NumPy 2D array representation of a byte pointer used the the Pixelink API.\r\n frameData: Byte pointer to the image provided by the Pixelink API\r\n width: Width of the image (in pixels)\r\n height: Height of the image (in pixels)\r\n bytesPerPixel: the number of bytes per pixel\r\n\"\"\" \r\ndef numPy_image (frameData, width, height, bytesPerPixel):\r\n buffer_from_memory = pythonapi.PyMemoryView_FromMemory\r\n buffer_from_memory.restype = py_object\r\n pBuffer = buffer_from_memory(frameData, width * height * bytesPerPixel, 0x200) # 0x200 == writable\r\n return np.frombuffer(pBuffer, np.uint8).reshape(height, width * bytesPerPixel)\r\n \r\n\"\"\"\r\nCallback function called by the API just before an image is displayed in the preview window. \r\nN.B. This is called by the API on a thread created in the API.\r\n\"\"\"\r\n@PxLApi._dataProcessFunction\r\ndef callback_format_preview(hCamera, frameData, dataFormat, frameDesc, userData):\r\n # Copy frame descriptor information\r\n frameDescriptor = frameDesc.contents\r\n # Find image dimensions\r\n width = int(frameDescriptor.Roi.fWidth / frameDescriptor.PixelAddressingValue.fHorizontal)\r\n height = int(frameDescriptor.Roi.fHeight / frameDescriptor.PixelAddressingValue.fVertical)\r\n bytesPerPixel = PxLApi.getBytesPerPixel(dataFormat)\r\n\r\n\r\n # Recast the returned image as a NumPy 2-Darray, that we can modify\r\n npFrame = numPy_image (frameData, width, height, bytesPerPixel)\r\n\r\n print(\"callback_format_image: hCamera = {0}, frameData = {1}\".format(hex(hCamera),\r\n hex(id(frameData))))\r\n print(\" dataFormat = {0} {1}, FrameDesc = {2}\".format(dataFormat,\r\n get_pixel_format_as_string(dataFormat),\r\n hex(id(frameDesc))))\r\n print(\" userData = {0}, threadId = {1}\".format(hex(userData), hex(id(threading.current_thread()))))\r\n print(\" imageData = {0} {1} {2} {3} {4} {5} {6} {7}\\n\".format(hex(frameData[0]), hex(frameData[1]), hex(frameData[2]),\r\n hex(frameData[3]), hex(frameData[4]), hex(frameData[5]),\r\n hex(frameData[6]), hex(frameData[7])))\r\n \r\n # Just to see the effect of the callback, increase intensity of the middle 20% of the pixels, to 100%\r\n startRow = int((height/5)*2)\r\n endRow = int((height/5)*3)\r\n startCol = int(((width*bytesPerPixel)/5)*2)\r\n endCol = int(((width*bytesPerPixel)/5)*3)\r\n\r\n npFrame[startRow:endRow,startCol:endCol] = 0xff\r\n\r\n return 0\r\n\r\n\r\ndef do_callback_on_preview(hCamera):\r\n # Set the callback function\r\n print(\"=====================================================\\n\")\r\n print(\"do_callback_on_preview\\n\")\r\n userData = 3735928559\r\n print(\"Registering PREVIEW callback with userData {0}\\n\".format(hex(userData)))\r\n ret = PxLApi.setCallback(hCamera, PxLApi.Callback.PREVIEW, userData, callback_format_preview)\r\n if(not(PxLApi.apiSuccess(ret[0]))):\r\n print(\"ERROR setting callback function: {0}\".format(ret[0]))\r\n return\r\n \r\n ret = PxLApi.setStreamState(hCamera, PxLApi.StreamState.START)\r\n if(not(PxLApi.apiSuccess(ret[0]))):\r\n print(\"ERROR setting stream state function: {0}\".format(ret[0]))\r\n return\r\n\r\n # We will start getting our callback called after we start previewing\r\n ret = PxLApi.setPreviewState(hCamera, PxLApi.PreviewState.START)\r\n if(not(PxLApi.apiSuccess(ret[0]))):\r\n print(\"ERROR setting preview state function: {0}\".format(ret[0]))\r\n return\r\n\r\n time.sleep(10) # Sleep 10 seconds\r\n\r\n # Stop previewing\r\n ret = PxLApi.setPreviewState(hCamera, PxLApi.PreviewState.STOP)\r\n if(not(PxLApi.apiSuccess(ret[0]))):\r\n print(\"ERROR setting preview state function: {0}\".format(ret[0]))\r\n return\r\n\r\n ret = PxLApi.setStreamState(hCamera, PxLApi.StreamState.STOP)\r\n\r\n # Disable callback on preview by setting the callback function to 0 or None\r\n ret = PxLApi.setCallback(hCamera, PxLApi.Callback.PREVIEW, userData, 0)\r\n\r\n\r\ndef main():\r\n \r\n ret = PxLApi.initialize(0)\r\n if(not(PxLApi.apiSuccess(ret[0]))):\r\n print(\"ERROR: {0}\\n\".format(ret[0]))\r\n return 1\r\n hCamera = ret[1]\r\n print(\"\\nMain thread id = {}\\n\".format(hex(id(threading.current_thread()))))\r\n \r\n # do_callback_on_format_image(hCamera) /* Callback.FORMAT_IMAGE is not supported */\r\n do_callback_on_preview(hCamera)\r\n\r\n PxLApi.uninitialize(hCamera)\r\n return 0\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" ]
[ [ "numpy.frombuffer" ] ]
emosy/scarphase
[ "98734fe149dacccd28b00232deeb15ed43af1031", "98734fe149dacccd28b00232deeb15ed43af1031" ]
[ "pyscarphase/plot/perfctrs.py", "pyscarphase/plot/signature.py" ]
[ "# Copyright (c) 2011-2013 Andreas Sembrant\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# - Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Authors: Andreas Sembrant\n\nimport matplotlib.pyplot as plt\n\nfrom pyscarphase.plot.util import phasebar\n\ndef plot_time(phase_list, cycles):\n\n #\n fig=plt.figure()\n\n # Plot top phase bar\n ax = plt.axes([0.1, 0.9, 0.8, 0.025])\n phasebar.plot(ax, phase_list)\n \n # Plot counter values\n ax2 = plt.axes([0.1, 0.1, 0.8, 0.75])\n ax2.plot(cycles)\n\n # Set same width \n ax.set_xlim(ax2.get_xlim())\n\n # \n plt.show()\n", "# Copyright (c) 2011-2013 Andreas Sembrant\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# - Neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Authors: Andreas Sembrant\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cmx\n\nfrom pyscarphase.plot.util import phasebar\n\ndef plot_time(phase_list, signatures):\n\n #\n fig=plt.figure()\n\n # Plot top phase bar\n ax = plt.axes([0.1, 0.9, 0.8, 0.025])\n phasebar.plot(ax, phase_list)\n \n # Plot counter values\n ax2 = plt.axes([0.1, 0.1, 0.8, 0.75])\n ax2.imshow(signatures, aspect='auto', interpolation='nearest', cmap=cmx.binary)\n\n # Set same width \n ax.set_xlim(ax2.get_xlim())\n\n # \n plt.show()\n" ]
[ [ "matplotlib.pyplot.axes", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.axes", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
TRex22/picamerax
[ "2e7b05c92331b315533835596862a643ba55b021" ]
[ "docs/examples/yuv_capture4.py" ]
[ "import time\nimport picamerax\nimport picamerax.array\nimport numpy as np\n\nwith picamerax.PiCamera() as camera:\n camera.resolution = (100, 100)\n time.sleep(2)\n y_data = np.empty((112, 128), dtype=np.uint8)\n try:\n camera.capture(y_data, 'yuv')\n except IOError:\n pass\n y_data = y_data[:100, :100]\n # y_data now contains the Y-plane only\n" ]
[ [ "numpy.empty" ] ]
smtnkc/bert4epi
[ "e45198916eba6e716be3e1e02c8c2a51de8c9891" ]
[ "combine_results.py" ]
[ "import os\nimport pandas as pd\n\n\nfiles = []\ntrain_cell_lines = []\ntest_cell_lines = []\nf1_scores = []\ntest_times = []\nconfusions = []\n\nfor file in os.listdir(\"results\"):\n if file.endswith(\".txt\") and not file.startswith('training'):\n files.append(file)\n\nfiles = sorted(files)\n\nfor filename in files:\n f_name = 'results/' + filename\n f = open(f_name, \"r\")\n print(f_name)\n content = f.read()\n f1 = content.split('F1 = ')[1].split('\\n')[0]\n print('F1 =', f1)\n test_time = content.split('TIME = ')[1]\n print('TIME =', test_time)\n confusion = content.split('CONFUSION = ')[1].split('\\n')[0]\n f.close()\n\n train_cell_lines.append(filename.split('_')[0])\n test_cell_lines.append(filename.split('_')[1])\n f1_scores.append(float(f1))\n test_times.append(float(test_time))\n confusions.append(confusion)\n\n\ndata = {\n 'train_cell_line': train_cell_lines,\n 'test_cell_line': test_cell_lines,\n 'f1_scores': f1_scores,\n 'test_time': test_times,\n 'confusion': confusions\n}\n\ndf = pd.DataFrame(data)\ndf.to_csv('results/results.csv', index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
rghosh8/capstone-2
[ "b7e16642a1f16123f3059db32497ba1c3c815f6f", "b7e16642a1f16123f3059db32497ba1c3c815f6f" ]
[ "notebooks/preprocessing.py", "notebooks/NLP_emb_lstm.py" ]
[ "import pandas as pd \n\n\nclass Preprocessing(object):\n def __init__(self, train_datefile, test_datafile):\n self.train_df, self.test_df = pd.read_csv('../data/train.csv'), pd.read_csv('../data/test.csv')\n self.train_df_dis = self.train_df[(self.train_df['target']==1)]\n self.train_df_nodis = self.train_df[(self.train_df['target']==0)]\n \n def null_treatment(self):\n train_df_keyword=self.train_df['keyword'].fillna('uns_keyword')\n self.train_df['modified_keyword'] = train_df_keyword\n train_df_loc=self.train_df['location'].fillna('uns_location')\n self.train_df['modified_location'] = train_df_loc\n self.train_df = self.train_df.drop(['location', 'keyword'], axis=1)\n return self.train_df ", "import pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM, Activation, Embedding, GlobalAveragePooling1D\nfrom sklearn_pipeline import *\n\nclass NLP_emb_lstm(object):\n def __init__(self, att, target, embedding_dim):\n self.att = att\n self.target = target\n model = Sequential() \n model.add(Embedding(5000, embedding_dim))\n model.add(LSTM(50)) \n# model.add(GlobalAveragePooling1D())\n# model.add(Dense(500, activation='relu'))\n model.add(Dense(100, activation='relu'))\n# model.add(Dense(50, activation='relu'))\n model.add(Dense(1, activation='sigmoid')) \n \n print(model.summary())\n\n METRICS = [tf.metrics.BinaryAccuracy(name='ACCURACY'), tf.metrics.BinaryAccuracy(name='PRECISION'), tf.metrics.BinaryAccuracy(name='RECALL'), \\\n tf.metrics.BinaryAccuracy(name='F1Score')]\n model.compile(loss=tf.keras.losses.BinaryCrossentropy(),\n optimizer=tf.keras.optimizers.Adam(1e-4),\n metrics=METRICS) \n\n self.model = model\n \n def fit(self, size_batch, no_epoch):\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=\"./logs\")\n history_augmented = self.model.fit(self.att, self.target, verbose=1, batch_size = size_batch, epochs = no_epoch, validation_split=0.2,\n shuffle=True, callbacks=[tensorboard_callback])\n \n return history_augmented\n\n\nif __name__ == \"__main__\":\n train_df=pd.read_csv('../data/processed_train.csv')\n test_df=pd.read_csv('../data/processed_test.csv')\n word_max_features=5000\n keyword_max_features=100\n location_max_features=50\n url_max_features=50\n embedding_dim = 2048\n train_X_augmented, test_X_augmented, train_target = train_test_augmented(train_df, \\\n test_df, word_max_features, keyword_max_features, location_max_features, \\\n url_max_features) \n\n embedding_model = NLP_emb_lstm(train_X_augmented, train_target.values, embedding_dim) \n\n embedding_model.fit(100, 100)\n" ]
[ [ "pandas.read_csv" ], [ "pandas.read_csv", "tensorflow.metrics.BinaryAccuracy", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard" ] ]
somhathai/Bringing-Old-Photos-Back-to-Life-master
[ "2a2e79af1fdd1b6dc66c4dc91197f757cd3ebfd1" ]
[ "Global/detection.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport os\nimport numpy as np\nimport argparse\nimport time\n\nimport torch\nimport torchvision as tv\nimport torch.nn.functional as F\nfrom detection_util.util import *\nfrom detection_models import networks\nfrom PIL import Image, ImageFile\nimport json\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef data_transforms(img, full_size, method=Image.BICUBIC):\n if full_size == \"full_size\":\n ow, oh = img.size\n h = int(round(oh / 16) * 16)\n w = int(round(ow / 16) * 16)\n if (h == oh) and (w == ow):\n return img\n return img.resize((w, h), method)\n\n if full_size == \"resize_256\":\n return img.resize((config.image_size, config.image_size), method)\n\n if full_size == \"scale_256\":\n\n ow, oh = img.size\n pw, ph = ow, oh\n if ow < oh:\n ow = 256\n oh = ph / pw * 256\n else:\n oh = 256\n ow = pw / ph * 256\n\n h = int(round(oh / 16) * 16)\n w = int(round(ow / 16) * 16)\n if (h == ph) and (w == pw):\n return img\n return img.resize((w, h), method)\n\n\ndef blend_mask(img, mask):\n\n np_img = np.array(img).astype(\"float\")\n\n return Image.fromarray((np_img * (1 - mask) + mask * 255.0).astype(\"uint8\")).convert(\"RGB\")\n\n\ndef main(config):\n print(\"initializing the dataloader\")\n\n model = networks.UNet(\n in_channels=1,\n out_channels=1,\n depth=4,\n conv_num=2,\n wf=6,\n padding=True,\n batch_norm=True,\n up_mode=\"upsample\",\n with_tanh=False,\n sync_bn=True,\n antialiasing=True,\n )\n\n ## load model\n checkpoint_path = \"./checkpoints/detection/FT_Epoch_latest.pt\"\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model_state\"])\n print(\"model weights loaded\")\n model.to(config.GPU)\n model.eval()\n\n ## dataloader and transformation\n print(\"directory of testing image: \" + config.test_path)\n imagelist = os.listdir(config.test_path)\n imagelist.sort()\n total_iter = 0\n\n P_matrix = {}\n save_url = os.path.join(config.output_dir)\n mkdir_if_not(save_url)\n\n input_dir = os.path.join(save_url, \"input\")\n output_dir = os.path.join(save_url, \"mask\")\n # blend_output_dir=os.path.join(save_url, 'blend_output')\n mkdir_if_not(input_dir)\n mkdir_if_not(output_dir)\n # mkdir_if_not(blend_output_dir)\n\n idx = 0\n\n for image_name in imagelist:\n\n idx += 1\n\n print(\"processing\", image_name)\n\n results = []\n scratch_file = os.path.join(config.test_path, image_name)\n if not os.path.isfile(scratch_file):\n print(\"Skipping non-file %s\" % image_name)\n continue\n scratch_image = Image.open(scratch_file).convert(\"RGB\")\n\n w, h = scratch_image.size\n\n transformed_image_PIL = data_transforms(scratch_image, config.input_size)\n\n scratch_image = transformed_image_PIL.convert(\"L\")\n scratch_image = tv.transforms.ToTensor()(scratch_image)\n\n scratch_image = tv.transforms.Normalize([0.5], [0.5])(scratch_image)\n\n scratch_image = torch.unsqueeze(scratch_image, 0)\n scratch_image = scratch_image.to(config.GPU)\n \n\n P = torch.sigmoid(model(scratch_image))\n\n P = P.data.cpu()\n\n tv.utils.save_image(\n (P >= 0.4).float(),\n os.path.join(output_dir, image_name[:-4] + \".png\",),\n nrow=1,\n padding=0,\n normalize=True,\n )\n transformed_image_PIL.save(os.path.join(input_dir, image_name[:-4] + \".png\"))\n # single_mask=np.array((P>=0.4).float())[0,0,:,:]\n # RGB_mask=np.stack([single_mask,single_mask,single_mask],axis=2)\n # blend_output=blend_mask(transformed_image_PIL,RGB_mask)\n # blend_output.save(os.path.join(blend_output_dir,image_name[:-4]+'.png'))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # parser.add_argument('--checkpoint_name', type=str, default=\"FT_Epoch_latest.pt\", help='Checkpoint Name')\n\n parser.add_argument(\"--GPU\", type=int, default=0)\n parser.add_argument(\"--test_path\", type=str, default=\".\")\n parser.add_argument(\"--output_dir\", type=str, default=\".\")\n parser.add_argument(\"--input_size\", type=str, default=\"scale_256\", help=\"resize_256|full_size|scale_256\")\n config = parser.parse_args()\n\n main(config)\n" ]
[ [ "numpy.array", "torch.unsqueeze", "torch.load" ] ]
SgtMilk/stock-prediction
[ "2fa9cf851b536ea1cd4fbcf9f767581b36ee38ad" ]
[ "src/predict.py" ]
[ "# Copyright (c) 2021 Alix Routhier-Lalonde. Licence included in root of package.\n\nfrom src.hyperparameters import Train\nfrom src.data import Dataset\nfrom src.model import Net\nfrom src.utils import get_base_path\nimport numpy as np\nimport datetime\nimport os\nimport torch\n\n\ndef predict_stock(code, mode: int, overwrite: bool = False):\n \"\"\"\n predict callback\n :param overwrite: overwrite flag\n :param code: the code to train to\n :param mode: Mode.daily, Mode.weekly, Mode.monthly\n :return: predicted data\n \"\"\"\n # getting the right file path\n destination_folder = os.path.abspath(\n os.path.join(get_base_path(), 'src/model/models'))\n filepath = os.path.join(\n destination_folder, f\"model-{str(mode)}.hdf5\")\n\n # getting the data\n dataset = Dataset(code, mode=mode, y_flag=True)\n dataset.transform_to_torch()\n\n gpu = torch.cuda.is_available()\n\n # getting our model and net\n model = Train.model(\n dataset.x.shape[-1], Train.hidden_dim, Train.num_dim, Train.dropout, mode)\n\n if gpu:\n model.to('cuda')\n\n if not os.path.exists(filepath) or overwrite:\n net = Net(Train.optimizer(model.parameters(), lr=Train.learning_rate), Train.loss(reduction='mean'), model,\n dataset)\n net.train(Train.epochs, dataset,\n Train.validation_split, Train.patience)\n model = net.model\n else:\n model.load_state_dict(torch.load(filepath))\n\n data = torch.from_numpy(np.array(dataset.prediction_data)).float()\n if gpu:\n data = data.to(device='cuda')\n predicted = model(data)\n\n # re-transforming to numpy\n predicted = predicted.detach().cpu().numpy()\n\n return dataset.normalizer.inverse_transform(predicted)\n" ]
[ [ "numpy.array", "torch.cuda.is_available", "torch.load" ] ]
stancld/adapter-transformers
[ "9a6bf1757b684a4c627c5a35a56e61ea706dccee" ]
[ "src/transformers/adapters/model_mixin.py" ]
[ "import logging\nimport os\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom os.path import join\nfrom typing import List, Optional, Union\n\nimport torch\nfrom torch import nn\n\nfrom .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition\nfrom .configuration import AdapterConfig, AdapterFusionConfig, ModelAdaptersConfig, get_adapter_config_hash\nfrom .hub_mixin import PushAdapterToHubMixin\nfrom .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader\nfrom .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock\nfrom .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvertibleAdaptersMixin:\n \"\"\"Mixin for Transformer models adding invertible adapters.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.invertible_adapters = nn.ModuleDict(dict())\n\n def add_invertible_adapter(self, adapter_name: str):\n \"\"\"\n Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an\n invertible adapter config, this method does nothing.\n\n Args:\n adapter_name (str): The name of the adapter for which to add an invertible adapter module.\n \"\"\"\n if adapter_name in self.invertible_adapters:\n raise ValueError(f\"Model already contains an adapter module for '{adapter_name}'.\")\n adapter_config = self.config.adapters.get(adapter_name)\n if adapter_config and adapter_config[\"inv_adapter\"]:\n if adapter_config[\"inv_adapter\"] == \"nice\":\n inv_adap = NICECouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n elif adapter_config[\"inv_adapter\"] == \"glow\":\n inv_adap = GLOWCouplingBlock(\n [[self.config.hidden_size]],\n non_linearity=adapter_config[\"non_linearity\"],\n reduction_factor=adapter_config[\"inv_adapter_reduction_factor\"],\n )\n else:\n raise ValueError(f\"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.\")\n self.invertible_adapters[adapter_name] = inv_adap\n self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)\n\n def delete_invertible_adapter(self, adapter_name: str):\n if adapter_name in self.invertible_adapters:\n del self.invertible_adapters[adapter_name]\n\n def get_invertible_adapter(self):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n return self.invertible_adapters[first_adapter]\n return None\n\n def enable_invertible_adapters(self, adapter_names):\n for adapter_name in adapter_names:\n if adapter_name in self.invertible_adapters:\n for param in self.invertible_adapters[adapter_name].parameters():\n param.requires_grad = True\n\n def invertible_adapters_forward(self, hidden_states, rev=False):\n # TODO: Currently no fusion over invertible adapters, takes only very first language adapter position\n if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:\n first_adapter = self.config.adapters.active_setup.first()\n if first_adapter in self.invertible_adapters:\n hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)\n\n return hidden_states\n\n\nclass ModelConfigAdaptersMixin(ABC):\n \"\"\"\n Mixin for model config classes, adding support for adapters.\n\n Besides adding this mixin to the config class of a model supporting adapters, make sure the following attributes/\n properties are present: hidden_dropout_prob, attention_probs_dropout_prob.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # adapter configuration\n adapter_config_dict = kwargs.pop(\"adapters\", None)\n if adapter_config_dict:\n self.adapters = ModelAdaptersConfig(**adapter_config_dict)\n else:\n self.adapters = ModelAdaptersConfig()\n # Convert AdapterFusions from old format for backwards compatibility\n fusion_models = kwargs.pop(\"adapter_fusion_models\", [])\n fusion_config = kwargs.pop(\"adapter_fusion\", None)\n for fusion_adapter_names in fusion_models:\n self.adapters.add_fusion(fusion_adapter_names, config=fusion_config)\n\n\nclass ModelAdaptersMixin(PushAdapterToHubMixin, ABC):\n \"\"\"Mixin for transformer models adding support for loading/ saving adapters.\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self.model_name = None\n self.loaded_embeddings = {}\n self._active_embedding = \"default\"\n\n # In some cases, the config is not an instance of a directly supported config class such as BertConfig.\n # Thus, we check the adapters config here to make sure everything is correct.\n if not hasattr(config, \"adapters\"):\n config.adapters = ModelAdaptersConfig()\n elif config.adapters is not None and not isinstance(config.adapters, ModelAdaptersConfig):\n config.adapters = ModelAdaptersConfig(**config.adapters)\n\n def _init_adapter_modules(self):\n \"\"\"\n This method initializes adapter modules and fusion modules from the model config.\n \"\"\"\n # Initialize adapters from config\n for adapter_name in self.config.adapters:\n self._add_adapter(adapter_name)\n # Initialize fusion from config\n for fusion_name in self.config.adapters.fusions:\n self._add_fusion_layer(fusion_name)\n\n self.loaded_embeddings[\"default\"] = self.get_input_embeddings()\n\n # These methods have to be implemented by every deriving class:\n\n @abstractmethod\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n pass\n\n def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n @abstractmethod\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n pass\n\n @abstractmethod\n def _add_adapter(self, adapter_name):\n pass\n\n @abstractmethod\n def _add_fusion_layer(self, adapter_names):\n pass\n\n def has_adapters(self):\n return len(self.config.adapters.adapters) > 0\n\n @property\n def has_parallel_adapters(self) -> bool:\n if self.config.adapters.active_setup:\n return self.config.adapters.active_setup.parallel_channels > 1\n else:\n return False\n\n @property\n def active_adapters(self) -> AdapterCompositionBlock:\n return self.config.adapters.active_setup\n\n @active_adapters.setter\n def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n self.set_active_adapters(adapter_setup)\n\n def set_active_adapters(\n self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None\n ):\n \"\"\"\n Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is\n found, no module of the respective type will be activated.\n\n Args:\n adapter_setup (list): The list of adapters to be activated by default. Can be a fusion or stacking configuration.\n \"\"\"\n adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)\n if adapter_setup:\n for adapter_name in adapter_setup.flatten():\n if adapter_name not in self.config.adapters.adapters:\n raise ValueError(\n f\"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded.\"\n )\n\n self.config.adapters.active_setup = adapter_setup\n self.config.adapters.skip_layers = skip_layers\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict or AdapterConfig, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional): Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n \"\"\"\n if isinstance(config, dict):\n config = AdapterConfig.from_dict(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and adapter_name in self.config.adapters:\n self.delete_adapter(adapter_name)\n self.config.adapters.add(adapter_name, config=config)\n self.base_model._add_adapter(adapter_name)\n if set_active:\n self.set_active_adapters(adapter_name)\n\n def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):\n warnings.warn(\n \"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.\",\n FutureWarning,\n )\n adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)\n self.add_adapter_fusion(adapter_names, adapter_fusion_config)\n\n def add_adapter_fusion(\n self,\n adapter_names: Union[Fuse, list, str],\n config=None,\n overwrite_ok: bool = False,\n set_active: bool = False,\n ):\n \"\"\"\n Adds AdapterFusion to the model with alll the necessary configurations and weight initializations\n\n Args:\n adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:\n\n - a ``Fuse`` composition block\n - a list of adapter names to fuse\n - a comma-separated string of adapter names to fuse\n config (str or dict): adapter fusion configuration, can be either:\n\n - a string identifying a pre-defined adapter fusion configuration\n - a dictionary representing the adapter fusion configuration\n - the path to a file containing the adapter fusion configuration\n overwrite_ok (bool, optional): Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional): Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_names = adapter_names.children\n elif isinstance(adapter_names, str):\n adapter_names = adapter_names.split(\",\")\n\n if isinstance(config, dict):\n config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date\n # In case adapter already exists and we allow overwriting, explicitly delete the existing one first\n if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:\n self.delete_adapter_fusion(adapter_names)\n self.config.adapters.add_fusion(adapter_names, config=config)\n self.base_model._add_fusion_layer(adapter_names)\n if set_active:\n if not isinstance(adapter_names, list):\n adapter_names = adapter_names.split(\",\")\n self.set_active_adapters(Fuse(*adapter_names))\n\n def delete_adapter(self, adapter_name: str):\n \"\"\"\n Deletes the adapter with the specified name from the model.\n\n Args:\n adapter_name (str): The name of the adapter.\n \"\"\"\n if adapter_name not in self.config.adapters:\n logger.info(\"No adapter '%s' found for deletion. Skipping.\", adapter_name)\n return\n del self.config.adapters.adapters[adapter_name]\n self.base_model._delete_adapter(adapter_name)\n # Reset active adapters if this was the only active adapter\n if self.active_adapters == Stack(adapter_name):\n self.active_adapters = None\n\n def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):\n \"\"\"\n Deletes the AdapterFusion layer of the specified adapters.\n\n Args:\n adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n if adapter_fusion_name not in self.config.adapters.fusions:\n logger.info(\"No AdapterFusion '%s' found for deletion. Skipping.\", adapter_fusion_name)\n return\n del self.config.adapters.fusions[adapter_fusion_name]\n self.base_model._delete_fusion_layer(adapter_fusion_name)\n # Reset active adapters if this was the active setup\n if self.active_adapters == adapter_names:\n self.active_adapters = None\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using\n `load_adapter()`.\n\n Args:\n save_directory (str): Path to a directory where the adapter should be saved.\n adapter_name (str): Name of the adapter to be saved.\n\n Raises:\n ValueError: If the given adapter name is invalid.\n \"\"\"\n loader = AdapterLoader(self)\n loader.save(save_directory, adapter_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_name)\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n if isinstance(adapter_names, Fuse):\n adapter_fusion_name = \",\".join(adapter_names.children)\n elif isinstance(adapter_names, list):\n adapter_fusion_name = \",\".join(adapter_names)\n elif isinstance(adapter_names, str):\n adapter_fusion_name = adapter_names\n else:\n raise ValueError(\"Invalid AdapterFusion definition: {}\".format(adapter_names))\n\n loader = AdapterFusionLoader(self)\n loader.save(save_directory, adapter_fusion_name, meta_dict)\n # save additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.save(save_directory, adapter_fusion_name)\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = \"ah\",\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained pytorch adapter module from the local file system or a remote location.\n\n Args:\n adapter_name_or_path (str): can be either:\n\n - the identifier of a pre-trained task adapter to be loaded from Adapter Hub\n - a path to a directory containing adapter weights saved using `model.saved_adapter()`\n - a URL pointing to a zip folder containing a saved adapter module\n config (dict or str, optional): The requested configuration of the adapter.\n If not specified, will be either: - the default adapter config for the requested adapter if specified -\n the global default adapter config\n version (str, optional): The version of the adapter to be loaded.\n model_name (str, optional): The string identifier of the pre-trained model.\n load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was\n saved will be used.\n source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:\n\n - \"ah\" (default): search on AdapterHub.\n - \"hf\": search on HuggingFace model hub.\n - None: only search on local file system\n leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.\n set_active (bool, optional): Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not activated.\n\n Returns:\n str: The name with which the adapter was added to the model.\n \"\"\"\n loader = AdapterLoader(self)\n load_dir, load_name = loader.load(\n adapter_name_or_path,\n config,\n version,\n model_name,\n load_as,\n source=source,\n leave_out=leave_out,\n set_active=set_active,\n **kwargs,\n )\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n id2label=id2label,\n set_active=set_active,\n )\n return load_name\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n \"\"\"\n Loads a pre-trained AdapterFusion layer from the local file system.\n\n Args:\n adapter_fusion_name_or_path (str): a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.\n load_as (str, optional): Load the AdapterFusion using this name.\n By default, the name with which the AdapterFusion layer was saved will be used.\n set_active (bool, optional): Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.\n\n Returns:\n str: The name with which the AdapterFusion was added to the model.\n \"\"\"\n\n loader = AdapterFusionLoader(self)\n load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)\n # load additional custom weights\n if custom_weights_loaders:\n for weights_loader in custom_weights_loaders:\n weights_loader.load(\n load_dir,\n load_as=load_as,\n loading_info=kwargs.get(\"loading_info\", None),\n main_load_name=load_name,\n set_active=set_active,\n )\n return load_name\n\n def save_all_adapters(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all adapters of this model together with their configuration to subfolders of the given location.\n\n Args:\n save_directory (str): Path to a directory where the adapters should be saved.\n \"\"\"\n for name in self.config.adapters:\n adapter_config = self.config.adapters.get(name)\n h = get_adapter_config_hash(adapter_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)\n\n def save_all_adapter_fusions(\n self,\n save_directory: str,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n \"\"\"\n Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given\n location.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion layers should be saved.\n \"\"\"\n for name in self.config.adapters.fusions:\n adapter_fusion_config = self.config.adapters.get_fusion(name)\n h = get_adapter_config_hash(adapter_fusion_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter_fusion(\n save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders\n )\n\n def freeze_model(self, freeze=True):\n \"\"\"Freezes all weights of the model.\"\"\"\n # first freeze/ unfreeze all model weights\n for param in self.base_model.parameters():\n param.requires_grad = not freeze\n self.model_freezed = freeze\n\n def pre_transformer_forward(self, **kwargs):\n \"\"\"\n This method should be called by every adapter-implementing model at the very beginning of the forward() method.\n \"\"\"\n # some warnings if we don't use available adapters\n active_adapters = self.active_adapters or kwargs.get(\"adapter_names\", None)\n if not active_adapters and self.has_adapters():\n logger.warning(\"There are adapters available but none are activated for the forward pass.\")\n\n self.config.adapters.is_parallelized = False\n\n def load_embeddings(self, path: str, name: str):\n \"\"\"\n Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned\n\n Args:\n path: the path to the saved embedding\n name: the name the embedding should be loaded as\n\n Returns: a tokenizer if it ws saved with the embedding otherwise None\n\n \"\"\"\n from ..models.auto.tokenization_auto import AutoTokenizer\n\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n tokenizer = None\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n if os.path.isdir(tokenizer_path):\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n if not os.path.isfile(embedding_path):\n raise FileNotFoundError(\"No embeddings found at {}\".format(embedding_path))\n weights = torch.load(embedding_path)\n\n self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)\n self.set_active_embeddings(name)\n return tokenizer\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n \"\"\"\n Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the\n present in both tokenizers are initialized to the embedding in the reference_embedding.\n\n Args:\n name: the name of the embedding\n tokenizer: the tokenizer determining the vocab of the embedding\n reference_embedding: the reference embedding to use for initializing the embeddings of tokens present in the newly created embedding\n reference_tokenizer: the tokenizer providing the vocab for the reference embedding\n embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)\n\n \"\"\"\n if name in self.loaded_embeddings:\n raise ValueError(\"An embedding with the name {} already exists\".format(name))\n if embedding_dim is None:\n embedding_dim = self.config.hidden_size\n embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)\n embedding.requires_grad_(False)\n if (reference_embedding is not None and reference_tokenizer is None) or (\n reference_tokenizer is not None and reference_embedding is None\n ):\n raise KeyError(\n \"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding\"\n )\n if reference_embedding is not None and reference_tokenizer is not None:\n tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())\n reference_vocab = reference_tokenizer.get_vocab()\n vocab = tokenizer.get_vocab()\n for t in tokens:\n idx_reference = reference_vocab[t]\n idx = vocab[t]\n embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()\n embedding.train(False)\n self.loaded_embeddings[name] = embedding\n self.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n \"\"\"\n Deletes the embedding with the given name\n\n Args:\n name: The name of the embedding that should be deleted\n\n \"\"\"\n if name not in self.loaded_embeddings:\n raise ValueError(\"No embedding with name {}\".format(name))\n if self.active_embeddings == name:\n logger.warning(\"The active embedding is deleted. Setting the default embedding as active.\")\n self.set_active_embeddings(\"default\")\n del self.loaded_embeddings[name]\n\n def save_embeddings(self, path, name, tokenizer=None):\n \"\"\"\n Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with\n the embedding.\n\n Args:\n path: The path where the embedding should be saved\n name: The name of the embedding that should be saved\n tokenizer: optionally a tokenizer to save with the embedding (default is None)\n\n \"\"\"\n if self.active_embeddings == name:\n self.loaded_embeddings[name] = self.get_input_embeddings()\n os.makedirs(path, exist_ok=True)\n embedding_path = os.path.join(path, EMBEDDING_FILE)\n torch.save(self.loaded_embeddings[name].weight, embedding_path)\n if tokenizer:\n tokenizer_path = os.path.join(path, TOKENIZER_PATH)\n tokenizer.save_pretrained(tokenizer_path)\n\n def set_active_embeddings(self, name):\n \"\"\"\n Sets the active embedding for the forward pass of the model\n\n Args:\n name: The name of the embedding that should be used\n\n \"\"\"\n self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()\n self.set_input_embeddings(self.loaded_embeddings[name])\n self._active_embedding = name\n\n @property\n def active_embeddings(self):\n return self._active_embedding\n\n\n@inherit_doc\nclass ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):\n \"\"\"\n Mixin adding support for loading/ saving adapters to transformer models with head(s).\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self._convert_to_flex_head = False\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional): Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n else:\n self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"\n Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class\n that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter(adapter_setup, train_embeddings)\n else:\n self.base_model.train_adapter(adapter_setup, train_embeddings)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"\n Sets the model into mode for training of adapter fusion determined by a list of adapter names. If\n self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n else:\n self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def _add_adapter(self, adapter_name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super()._add_adapter(adapter_name)\n else:\n self.base_model._add_adapter(adapter_name)\n\n def _add_fusion_layer(self, adapter_names):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super()._add_fusion_layer(adapter_names)\n else:\n self.base_model._add_fusion_layer(adapter_names)\n\n def save_head(self, save_directory: str, head_name: str = None):\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, name=head_name)\n\n def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):\n loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)\n return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_adapter(\n save_directory,\n adapter_name,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = \"ah\",\n with_head: bool = True,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(\n PredictionHeadLoader(\n self,\n error_on_missing=False,\n convert_to_flex_head=self._convert_to_flex_head,\n )\n )\n # Support passing a num_labels for compatibility reasons. Convert to label map here.\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None:\n id2label = {i: \"LABEL_\" + str(i) for i in range(num_labels)}\n return super().load_adapter(\n adapter_name_or_path,\n config=config,\n version=version,\n model_name=model_name,\n load_as=load_as,\n source=source,\n custom_weights_loaders=custom_weights_loaders,\n leave_out=leave_out,\n id2label=id2label,\n set_active=set_active,\n **kwargs,\n )\n\n def save_all_adapters(\n self,\n save_directory: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_all_adapters(\n save_directory,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n with_head: Union[bool, str] = False,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n with_head (Union[bool, str]): If True, will save a head with the same name as the AdapterFusionLayer. If a string,\n this will be used as the name of the head to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)\n\n if with_head:\n # Make sure to cover the different options for adapter_names\n if isinstance(with_head, str):\n head_name = with_head\n elif isinstance(adapter_names, Fuse):\n head_name = adapter_names.name\n elif isinstance(adapter_names, list):\n head_name = \",\".join(adapter_names)\n else:\n head_name = adapter_names\n if head_name not in self.heads:\n raise ValueError(\"No head with name {} found\".format(head_name))\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, head_name)\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n with_head: bool = True,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)\n\n def save_all_heads(self, save_directory):\n for head_name in self.heads:\n save_path = join(save_directory, head_name)\n self.save_head(save_path, head_name)\n\n def get_labels(self):\n return list(self.config.id2label.values())\n\n def get_labels_dict(self):\n return self.config.id2label\n\n def get_adapter(self, name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n return super().get_adapter(name)\n else:\n return self.base_model.get_adapter(name)\n\n def load_embeddings(self, path: str, name: str):\n if self.base_model is self:\n return super().load_embeddings(path, name)\n else:\n return self.base_model.load_embeddings(path, name)\n\n def save_embeddings(self, path, name, tokenizer=None):\n if self.base_model is self:\n return super().save_embeddings(path, name, tokenizer)\n else:\n return self.base_model.save_embeddings(path, name, tokenizer)\n\n def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):\n if self.base_model is None:\n return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)\n else:\n return self.base_model.add_embeddings(\n name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim\n )\n\n def set_active_embeddings(self, name):\n if self.base_model is None:\n return super().set_active_embeddings(name)\n else:\n return self.base_model.set_active_embeddings(name)\n\n def delete_embeddings(self, name):\n if self.base_model is None:\n return super().delete_embeddings(name)\n else:\n return self.base_model.delete_embeddings(name)\n" ]
[ [ "torch.save", "torch.nn.Embedding.from_pretrained", "torch.nn.Embedding", "torch.load" ] ]
vishalbelsare/tanda
[ "83ffe22e3ecd4061e9d96e90d8135fd44cddddce" ]
[ "tanda/discriminator/dcnn.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.rnn as rnn\n\nfrom .discriminator import Discriminator\nfrom functools import partial\n\n\nD_H = 2\nD_W = 2\n\n\nclass DCNN(Discriminator):\n \"\"\"\n Discriminator from DCGAN paper\n From https://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py\n \"\"\"\n def __init__(self, dims=[28, 28, 1], df_dim=64):\n super(DCNN, self).__init__(dims=(dims if len(dims) == 3 else dims+[1]))\n self.df_dim = df_dim\n self.out_dim = self.last_layer_size\n\n def _get_logits_op(self, X, n_classes=1, train=True, reuse=False,\n get_layers=False, **kwargs):\n \"\"\"Returns logits\"\"\"\n batch_norm = partial(batch_norm_op, \n bn_vars_collection=self.bn_vars_collection)\n n_batch = tf.shape(X)[0]\n # Apply convolutional layers\n h0 = conv2d(X, self.dims[-1], self.df_dim, name='d_h0_conv')\n h0_a = lrelu(h0)\n h1 = conv2d(h0_a, self.df_dim, self.df_dim * 2, name='d_h1_conv') \n h1_a = lrelu(batch_norm(h1, name='bn_1', train=train, reuse=reuse))\n h2 = conv2d(h1_a, self.df_dim * 2, self.df_dim * 4, name='d_h2_conv') \n h2_a = lrelu(batch_norm(h2, name='bn_2', train=train, reuse=reuse))\n h3 = conv2d(h2_a, self.df_dim * 4, self.df_dim * 8, name='d_h3_conv') \n h3_a = lrelu(batch_norm(h3, name='bn_3', train=train, reuse=reuse))\n h_out = tf.reshape(h3_a, [n_batch, self.out_dim])\n h4 = linear(h_out, self.out_dim, n_classes, scope='d_h3_lin')\n # Check for get_layers\n if get_layers:\n layers = [tf.reshape(z, (n_batch, -1)) for z in [h0, h1, h2, h3]]\n return h4, layers\n return h4\n\n @property\n def last_layer_size(self):\n n_convs, h, w = 4, D_H, D_W\n z1, z2 = self.dims[0], self.dims[1]\n for _ in range(n_convs):\n z1, z2 = int(np.ceil(float(z1) / h)), int(np.ceil(float(z2) / w))\n return int(z1 * z2 * self.df_dim * (2. ** (n_convs - 1)))\n\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\n\ndef conv2d(X, in_dim, out_dim, k_h=5, k_w=5, d_h=D_H, d_w=D_W, stddev=0.02,\n name=\"conv2d\"):\n # Note: dims is (h, w, n_channels)\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k_h, k_w, in_dim, out_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n # Standard convolution\n conv = tf.nn.conv2d(X, w, strides=[1, d_h, d_w, 1], padding='SAME')\n # Add biases\n biases = tf.get_variable('biases', [out_dim],\n initializer=tf.constant_initializer(0.0))\n conv = tf.nn.bias_add(conv, biases)\n return conv\n\n\ndef batch_norm_op(x, bn_vars_collection=\"BN_vars\", train=True, reuse=False,\n epsilon=1e-5, momentum=0.9, name=\"batch_norm\"):\n return tf.contrib.layers.batch_norm(\n x, \n decay=momentum,\n scale=True,\n epsilon=epsilon, \n variables_collections=[bn_vars_collection],\n is_training=train,\n reuse=reuse,\n scope=name\n )\n\n\ndef linear(X, in_dim, out_size, scope=None, stddev=0.02, bias_start=0.0):\n with tf.variable_scope(scope or \"linear\"):\n w = tf.get_variable(\"w\", [in_dim, out_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n b = tf.get_variable(\"bias\", [out_size],\n initializer=tf.constant_initializer(bias_start))\n return tf.matmul(X, w) + b\n" ]
[ [ "tensorflow.nn.bias_add", "tensorflow.matmul", "tensorflow.shape", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.truncated_normal_initializer", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.contrib.layers.batch_norm", "tensorflow.random_normal_initializer", "tensorflow.nn.conv2d" ] ]
nknetsky/ABASliceDownloader
[ "d5dc0623eafd1fbb8437239b4b1877c89709ee27" ]
[ "function.py" ]
[ "# %%\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport pandas as pd\nimport sys\n\nfrom allensdk.api.queries.image_download_api import ImageDownloadApi\nfrom allensdk.config.manifest import Manifest\n\nimport logging\nimport os\nfrom tqdm import tqdm\n\n\n# %%\ndef get_gene_by_id(results_df, ExperimentID):\n gene_name = results_df[\"Gene Symbol\"][\n results_df[\"ExperimentID\"] == ExperimentID\n ].iloc[0]\n print(\n \"You are requesting for downloading brain lices of \"\n + gene_name\n + \" (\"\n + ExperimentID\n + \")\",\n file=sys.stderr,\n flush=True,\n )\n print(\n 'The downloaded brain lices will be placed in the dir \"' + gene_name + '\".',\n file=sys.stderr,\n flush=True,\n )\n return gene_name\n\n\n# %%\ndef search_by_keywords(keywords, outfile):\n\n # create a browser\n driver = webdriver.Chrome()\n\n # create a result DataFrame to store results\n result = pd.DataFrame()\n \n # the index of necessary columns in the table\n column_index = [1, 2, 3, 6]\n\n for ii, keyword in enumerate(keywords):\n\n url = \"https://mouse.brain-map.org/search/show?search_term=\" + keyword\n driver.get(url)\n\n # make sure the page is correcly loaded using explict wait\n try:\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"slick-column-name\"))\n )\n except:\n print(\n \"An exception occurred: an element could not be found.\\nThe Internet speed may be too slow.\"\n )\n driver.quit()\n exit()\n\n # get header at the first loop\n# if ii == 0:\n # use selenium to find the header\n elements = driver.find_elements_by_class_name(\"slick-column-name\")\n header = []\n for element in elements:\n header.append(element.text)\n if len(header) == 8:\n header = [header[i] for i in column_index]\n else:\n raise Exception(\"Something went wrong when accessing the header.\")\n\n # user selenium to find the search results in the cells of the table\n elements = driver.find_elements_by_tag_name(\"div[row]\")\n rows = []\n for element in elements:\n if element.text:\n rows.append([element.text.split(\"\\n\")[i - 1] for i in column_index])\n\n # If the search result is present, make it a dataframe\n if rows:\n table = pd.DataFrame(rows, columns=header)\n table.insert(0, \"Keyword\", keyword)\n # If no search result, make an empty dataframe\n else:\n table = pd.DataFrame([keyword], columns=[\"Keyword\"])\n\n # concatenate the search results of each keyword\n result = pd.concat([result, table], ignore_index=True)\n\n # print the search results\n print(result)\n\n driver.quit()\n\n result.to_csv(outfile)\n\n return result\n\n\n# %%\ndef download_brain_slice(df):\n\n # create an image download API\n image_api = ImageDownloadApi()\n format_str = \".jpg\"\n\n # You have probably noticed that the AllenSDK has a logger which notifies you of file downloads.\n # Since we are downloading ~300 images, we don't want to see messages for each one.\n # The following line will temporarily disable the download logger.\n logging.getLogger(\"allensdk.api.api.retrieve_file_over_http\").disabled = True\n\n # get parameters\n path, downsample, indices = ask_parameters_for_downloading(df)\n\n print(\n \"Downloads initiated\", end=\"...\", file=sys.stderr, flush=True,\n )\n\n for index in indices:\n\n # from indices, get experiment id and gene symbol from df\n exp_id = df[\"Experiment\"][index]\n\n # set the dirname as the gene symbol\n dirname = df[\"Gene Symbol\"][index]\n\n plane = df[\"Plane\"][index]\n section_data_set_id = exp_id\n section_image_directory = os.path.join(path, dirname)\n\n # get the image ids for all of the images in this data set\n section_images = image_api.section_image_query(\n section_data_set_id\n ) # Should be a dicionary of the features of section images\n section_image_ids = [\n si[\"id\"] for si in section_images\n ] # Take value of 'id' from the dictionary\n\n # Create a progress bar\n pbar_image = tqdm(total=len(section_image_ids), desc=dirname + \" \" + plane)\n\n for section_image_id in section_image_ids:\n\n file_name = str(section_image_id) + format_str\n file_path = os.path.join(section_image_directory, file_name)\n\n Manifest.safe_make_parent_dirs(file_path)\n\n # Check if the file is already downloaded, which happens if the downloads have been interrupted.\n saved_file_names = os.listdir(section_image_directory)\n if file_name in saved_file_names:\n pass\n else:\n image_api.download_section_image(\n section_image_id, file_path=file_path, downsample=downsample\n )\n\n pbar_image.update()\n\n pbar_image.close()\n\n # re-enable the logger\n logging.getLogger(\"allensdk.api.api.retrieve_file_over_http\").disabled = False\n print(\n \"Downloads completed.\", file=sys.stderr, flush=True,\n )\n\n\n# %%\ndef read_previous_results(infile):\n \n result = pd.read_csv(infile, index_col=0)\n print(result)\n\n return result\n\n\n# %%\ndef customized_settings():\n\n path = \".\"\n downsample = 0\n downsample_limit = 7\n\n # make a prompt for path\n while True:\n print(\n \"Enter a path to store brain slices (default: \\033[1m[current directory]\\033[0;0m).\"\n )\n path = input(\">\")\n\n # assess the input\n\n # q\n if path == \"q\":\n sys.exit(\"The program has been stopped by the user.\")\n # if no input, set default\n elif not path:\n break\n # user input\n else:\n # expanduser, \"~\" is usable\n path = os.path.expanduser(path)\n # correct path\n if os.path.isdir(path):\n break\n # unvalid input\n else:\n print('Error: the path \"' + path + '\" is not present.')\n\n # make a prompt for downsample\n while True:\n print(\n \"Downsample number? 0 indicates full size of slices. ([0]-\"\n + str(downsample_limit - 1)\n + \") \"\n )\n downsample = input(\">\")\n\n # assess the input\n\n # q\n if downsample == \"q\":\n sys.exit(\"The program has been stopped by the user.\")\n # if no input, set default\n elif not downsample:\n break\n # user input\n else:\n # convert input to integer\n try:\n downsample = int(downsample)\n except ValueError:\n print(\"Error: the input should be an integer.\")\n continue\n # within downsample_limit\n if downsample in range(downsample_limit):\n break\n else:\n print(\"Error: the input is out of downsample limit.\")\n\n return path, downsample\n\n\ndef ask_parameters_for_downloading(df):\n\n print(\n \"Before downloading brain slices, some parameters are need to be set.\\nIf defaults settings are preferable, press \\033[1mEnter\\033[0;0m key.\\nTo stop the program, enter \\033[1mq\\033[0;0m .\"\n )\n\n # make a prompt for default settings\n while True:\n print(\"Use default settings? ([y]/n)\")\n default = input(\">\")\n\n # assess the input\n\n # q\n if default == \"q\":\n sys.exit(\"The program has been stopped by the user.\")\n # # if no input or input y\n elif not default or default == \"y\":\n path = \".\"\n downsample = 0\n break\n # n\n elif default == \"n\":\n path, downsample = customized_settings()\n break\n # unvalid input\n else:\n print(\"Error: the input should be y or n.\")\n\n # make a prompt for index\n while True:\n print(\n \"Enter index of the experiments to be downloaded. A list is allowed. For example, enter \\033[1m0\\033[0;0m or \\033[1m0 1 2\\033[0;0m, in which every index should be separated by at least one space (No default).\"\n )\n index = input(\">\")\n\n # assess the input\n\n # q\n if index == \"q\":\n sys.exit(\"The program has been stopped by the user.\")\n # if no input, set default\n elif not index:\n continue\n # user input\n else:\n # separate indices\n index = index.split()\n # convert indices from string to interger\n try:\n index = [int(n) for n in index]\n except ValueError:\n print(\"Error: the input should be integers.\")\n continue\n # remove repeated numbers\n index = list(set(index))\n # sort the input\n index.sort()\n # check if all the numbers in index is within df.index\n if all(n in df.index for n in index):\n break\n else:\n print(\"Error: the input is out of index.\")\n\n return path, downsample, index\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ] ]
FeixLiu/abnet
[ "046feafa9280bc77f61f8ef5f76241e8e0f3ecbc" ]
[ "bert/modeling.py" ]
[ " # coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport shutil\nimport tarfile\nimport tempfile\nimport sys\nfrom io import open\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss\n\nfrom .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME\n\nlogger = logging.getLogger(__name__)\n\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz\",\n 'bert-base-german-cased': \"https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased.tar.gz\",\n}\nBERT_CONFIG_NAME = 'bert_config.json'\nTF_WEIGHTS_NAME = 'model.ckpt'\n\ndef load_tf_weights_in_bert(model, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n layer_norm_eps: The epsilon used by LayerNorm.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\" Save this instance to a json file.\"\"\"\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n logger.info(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\")\n class BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n # print(hidden_states.size(), input_tensor.size())\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n all_encoder_layers = []\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n \"\"\"\n Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name_or_path: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-large-cased`\n . `bert-base-multilingual-uncased`\n . `bert-base-multilingual-cased`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `model.chkpt` a TensorFlow checkpoint\n from_tf: should we load the weights from a locally saved TensorFlow checkpoint\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n state_dict = kwargs.get('state_dict', None)\n kwargs.pop('state_dict', None)\n cache_dir = kwargs.get('cache_dir', None)\n kwargs.pop('cache_dir', None)\n from_tf = kwargs.get('from_tf', False)\n kwargs.pop('from_tf', None)\n from_scratch = kwargs.get('from_scratch', False)\n kwargs.pop('from_scratch', None)\n\n if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n if resolved_archive_file == archive_file:\n logger.info(\"loading archive file {}\".format(archive_file))\n else:\n logger.info(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file) or from_tf:\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n logger.info(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n print(resolved_archive_file)\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n if not os.path.exists(config_file):\n # Backward compatibility with old naming format\n config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if from_scratch:\n return model\n if state_dict is None and not from_tf:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path, map_location='cpu')\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)\n return load_tf_weights_in_bert(model, weights_path)\n # Load from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n start_prefix = ''\n if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):\n start_prefix = 'bert.'\n load(model, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if len(error_msgs) > 0:\n raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n return model\n\n\nclass BertModel(BertPreTrainedModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLS`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n self.hidden_size = config.hidden_size\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\nclass BertLayerWithAdapter(BertLayer):\n def __init__(self, config, adapter_dimension=2048, layer_num=0, top_layer_adapter=-1):\n super(BertLayerWithAdapter, self).__init__(config)\n self.layer_num = layer_num\n self.top_layer_adapter = top_layer_adapter\n if layer_num >= top_layer_adapter:\n self.adapter_ln = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.adapter_w1 = Linear(config.hidden_size, adapter_dimension, bias=False)\n self.adapter_w2 = Linear(adapter_dimension, config.hidden_size, bias=False)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if self.layer_num>=self.top_layer_adapter:\n adapter_input = self.adapter_ln(layer_output)\n adapter_inter = F.relu(self.adapter_w1(adapter_input))\n adapter_output = self.adapter_w2(adapter_inter) + layer_output\n return adapter_output\n else:\n return layer_output\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n\nclass BertEncoderWithAdapter(BertEncoder):\n def __init__(self, config, adapter_dimension=2048, top_layer_adapter=-1):\n super(BertEncoderWithAdapter, self).__init__(config)\n self.layer = nn.ModuleList([copy.deepcopy(BertLayerWithAdapter(config, adapter_dimension, i, top_layer_adapter)) for i in range(config.num_hidden_layers)])\n\nclass BertModelWithAdapter(BertModel):\n def __init__(self, config, adapter_dimension=2048, top_layer_adapter=-1):\n super(BertModelWithAdapter, self).__init__(config)\n self.encoder = BertEncoderWithAdapter(config, adapter_dimension, top_layer_adapter)\n self.embed_lengths = nn.Embedding(1024, self.hidden_size)\n self.top_layer_adapter = top_layer_adapter\n nn.init.normal_(self.embed_lengths.weight, mean=0, std=0.02)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n len_tokens = self.embed_lengths(input_ids.new(input_ids.size(0), 1).fill_(0))\n embedding_output = torch.cat([len_tokens, embedding_output], dim=1)\n attention_mask = torch.cat([attention_mask.new(input_ids.size(0), 1).fill_(1), attention_mask], dim=1)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=False)[-1]\n predicted_lengths_logits = torch.matmul(encoded_layers[:, 0, :], self.embed_lengths.weight.transpose(0, 1)).float()\n predicted_lengths_logits[:, 0] += float('-inf') # Cannot predict the len_token\n predicted_lengths = F.log_softmax(predicted_lengths_logits, dim=-1)\n encoded_layers = encoded_layers[:, 1:, :]\n\n sequence_output = encoded_layers\n pooled_output = self.pooler(sequence_output)\n\n return encoded_layers, pooled_output, predicted_lengths\n\n\nclass BertForPreTraining(BertPreTrainedModel):\n \"\"\"BERT model with pre-training heads.\n This module comprises the BERT model followed by the two pre-training heads:\n - the masked language modeling head, and\n - the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `masked_lm_labels` and `next_sentence_label` are not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `masked_lm_labels` or `next_sentence_label` is `None`:\n Outputs a tuple comprising\n - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and\n - the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForPreTraining(config)\n masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):\n sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n return total_loss\n else:\n return prediction_scores, seq_relationship_score\n\n\nclass BertForMaskedLM(BertPreTrainedModel):\n \"\"\"BERT model with the masked language modeling head.\n This module comprises the BERT model followed by the masked language modeling head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n\n Outputs:\n if `masked_lm_labels` is not `None`:\n Outputs the masked language modeling loss.\n if `masked_lm_labels` is `None`:\n Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForMaskedLM(config)\n masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores = self.cls(sequence_output)\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n return masked_lm_loss\n else:\n return prediction_scores\n\n\nclass BertForNextSentencePrediction(BertPreTrainedModel):\n \"\"\"BERT model with next sentence prediction head.\n This module comprises the BERT model followed by the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `next_sentence_label` is not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `next_sentence_label` is `None`:\n Outputs the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForNextSentencePrediction(config)\n seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n seq_relationship_score = self.cls( pooled_output)\n\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return next_sentence_loss\n else:\n return seq_relationship_score\n\n\nclass BertForSequenceClassification(BertPreTrainedModel):\n \"\"\"BERT model for classification.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary. Items in the batch should begin with the special \"CLS\" token. (see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForSequenceClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForSequenceClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForMultipleChoice(BertPreTrainedModel):\n \"\"\"BERT model for multiple choice tasks.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_choices`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`\n and type 1 corresponds to a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_choices].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])\n input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])\n token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_choices = 2\n\n model = BertForMultipleChoice(config, num_choices)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_choices=2):\n super(BertForMultipleChoice, self).__init__(config)\n self.num_choices = num_choices\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, self.num_choices)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n return loss\n else:\n return reshaped_logits\n\n\nclass BertForTokenClassification(BertPreTrainedModel):\n \"\"\"BERT model for token-level classification.\n This module is composed of the BERT model with a linear layer on top of\n the full hidden state of the last layer.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, sequence_length, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForTokenClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForQuestionAnswering(BertPreTrainedModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.bert = BertModel(config)\n # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version\n # self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.qa_outputs = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n return total_loss\n else:\n return start_logits, end_logits\n" ]
[ [ "torch.nn.Softmax", "torch.cat", "torch.load", "torch.zeros", "torch.nn.Embedding", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.sqrt", "torch.from_numpy", "torch.arange", "tensorflow.train.list_variables", "torch.ones_like", "torch.sigmoid", "torch.nn.init.constant_", "torch.zeros_like", "tensorflow.train.load_variable", "torch.nn.Linear", "torch.nn.init.normal_", "numpy.transpose", "torch.nn.functional.log_softmax", "torch.nn.Tanh", "torch.matmul", "torch.nn.init.xavier_uniform_" ] ]
liangliannie/hht-spectrum
[ "81820901ea1fdf223c69204e26d2a815b92c2b0b" ]
[ "source/hht.py" ]
[ "from netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom PyEMD import EEMD\n\n\ndef plot_imfs(signal, imfs, time_samples=None, fig=None):\n \"\"\"\n plot_imfs function for the Hilbert Huang Transform is adopted from pyhht.\n Author: jaidevd https://github.com/jaidevd/pyhht/blob/dev/pyhht/visualization.py\n Plot the signal, IMFs.\n\n Parameters\n ----------\n signal : array-like, shape (n_samples,)\n The input signal.\n imfs : array-like, shape (n_imfs, n_samples)\n Matrix of IMFs as generated with the `EMD.decompose` method.\n time_samples : array-like, shape (n_samples), optional\n Time instants of the signal samples.\n (defaults to `np.arange(1, len(signal))`)\n -------\n `matplotlib.figure.Figure`\n The figure (new or existing) in which the decomposition is plotted.\n \"\"\"\n n_imfs = imfs.shape[0]\n ax = plt.subplot(n_imfs + 1, 1, 1)\n ax.plot(time_samples, signal)\n ax.axis([time_samples[0], time_samples[-1], signal.min(), signal.max()])\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.grid(False)\n ax.set_ylabel('Signal')\n ax.set_title('Empirical Mode Decomposition')\n\n # Plot the IMFs\n for i in range(n_imfs - 1):\n # print(i + 2)\n ax = plt.subplot(n_imfs + 1, 1, i + 2)\n ax.plot(time_samples, imfs[i, :])\n # ax.axis([time_samples[0], time_samples[-1], -axis_extent, axis_extent])\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.grid(False)\n ax.set_ylabel('imf' + str(i + 1))\n\n # Plot the residue\n ax = plt.subplot(n_imfs + 1, 1, n_imfs + 1)\n ax.plot(time_samples, imfs[-1, :], 'r')\n ax.axis('tight')\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.grid(False)\n ax.set_ylabel('res.')\n return ax\n\n\ndef plot_frequency(signal, imfs, time_samples=None, fig=None):\n \"\"\"\n plot_imfs function for the Hilbert Huang Transform is adopted from pyhht.\n Author: jaidevd https://github.com/jaidevd/pyhht/blob/dev/pyhht/visualization.py\n Plot the signal, IMFs.\n\n Parameters\n ----------\n signal : array-like, shape (n_samples,)\n The input signal.\n imfs : array-like, shape (n_imfs, n_samples)\n Matrix of IMFs as generated with the `EMD.decompose` method.\n time_samples : array-like, shape (n_samples), optional\n Time instants of the signal samples.\n (defaults to `np.arange(1, len(signal))`)\n -------\n `matplotlib.figure.Figure`\n The figure (new or existing) in which the instance frequency is plotted.\n \"\"\"\n n_imfs = imfs.shape[0]\n # print(np.abs(imfs[:-1, :]))\n # axis_extent = max(np.max(np.abs(imfs[:-1, :]), axis=0))\n # Plot original signal\n ax = plt.subplot(n_imfs + 1, 1, 1)\n ax.plot(time_samples, signal)\n ax.axis([time_samples[0], time_samples[-1], signal.min(), signal.max()])\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.grid(False)\n ax.set_ylabel('Signal')\n ax.set_title('Instantaneous frequency of IMFs')\n\n # Plot the IMFs\n for i in range(n_imfs - 1):\n # print(i + 2)\n ax = plt.subplot(n_imfs + 1, 1, i + 2)\n ax.plot(time_samples, imfs[i, :])\n # ax.axis([time_samples[0], time_samples[-1], -axis_extent, axis_extent])\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.yaxis.tick_right()\n # ax.yaxis.set_ticks(np.logspace(1, 5, 5))\n plt.tick_params(axis='right', which='minor', labelsize=6)\n ax.grid(False)\n ax.set_ylim((0, np.max(imfs[i, :])))\n ax.set_ylabel('imf' + str(i + 1))\n\n # Plot the residue\n ax = plt.subplot(n_imfs + 1, 1, n_imfs + 1)\n ax.plot(time_samples, imfs[-1, :], 'r')\n ax.axis('tight')\n ax.tick_params(which='both', left=False, bottom=False, labelleft=False,\n labelbottom=False)\n ax.grid(False)\n ax.set_ylabel('res.')\n return ax\n\n\ndef hilb(s, unwrap=False):\n \"\"\"\n Performs Hilbert transformation on signal s.\n Returns amplitude and phase of signal.\n Depending on unwrap value phase can be either\n in range [-pi, pi) (unwrap=False) or\n continuous (unwrap=True).\n \"\"\"\n from scipy.signal import hilbert\n H = hilbert(s)\n amp = np.abs(H)\n phase = np.arctan2(H.imag, H.real)\n if unwrap: phase = np.unwrap(phase)\n\n return amp, phase\n\n\ndef FAhilbert(imfs, dt):\n \"\"\"\n Performs Hilbert transformation on imfs.\n Returns frequency and amplitude of signal.\n \"\"\"\n n_imfs = imfs.shape[0]\n f = []\n a = []\n for i in range(n_imfs - 1):\n # upper, lower = pyhht.utils.get_envelops(imfs[i, :])\n inst_imf = imfs[i, :] # /upper\n inst_amp, phase = hilb(inst_imf, unwrap=True)\n inst_freq = (2 * math.pi) / np.diff(phase) #\n\n inst_freq = np.insert(inst_freq, len(inst_freq), inst_freq[-1])\n inst_amp = np.insert(inst_amp, len(inst_amp), inst_amp[-1])\n\n f.append(inst_freq)\n a.append(inst_amp)\n return np.asarray(f).T, np.asarray(a).T\n\n\ndef hht(data, time, freqsol=33, timesol=50):\n \"\"\"\n hht function for the Hilbert Huang Transform spectrum\n\n Parameters\n ----------\n data : array-like, shape (n_samples,)\n The input signal.\n time : array-like, shape (n_samples), optional\n Time instants of the signal samples.\n (defaults to `np.arange(1, len(signal))`)\n -------\n `matplotlib.figure.Figure`\n The figure (new or existing) in which the hht spectrum is plotted.\n\n example:\n --------------------\n\n .. sourcecode:: ipython\n f = Dataset('./source/obs.nc')\n # read one example data\n fsh = f.variables['FSH']\n time = f.variables['time']\n one_site = np.ma.masked_invalid(fsh[0,:])\n time = time[~one_site.mask]\n data = one_site.compressed()\n hht(data, time)\n\n\n ----------------\n \"\"\"\n # freqsol give frequency - axis resolution for hilbert - spectrum\n # timesol give time - axis resolution for hilbert - spectrum\n t0 = time[0]\n t1 = time[-1]\n dt = (t1 - t0) / (len(time) - 1)\n\n eemd = EEMD()\n imfs = eemd.eemd(data)\n freq, amp = FAhilbert(imfs, dt)\n\n # fw0 = np.min(np.min(freq)) # maximum frequency\n # fw1 = np.max(np.max(freq)) # maximum frequency\n\n # if fw0 <= 0:\n # fw0 = np.min(np.min(freq[freq > 0])) # only consider positive frequency\n\n # fw = fw1-fw0\n tw = t1 - t0\n\n bins = np.linspace(0, 12, freqsol) # np.logspace(0, 10, freqsol, base=2.0)\n p = np.digitize(freq, 2 ** bins)\n t = np.ceil((timesol - 1) * (time - t0) / tw)\n t = t.astype(int)\n\n hilbert_spectrum = np.zeros([timesol, freqsol])\n for i in range(len(time)):\n for j in range(imfs.shape[0] - 1):\n if p[i, j] >= 0 and p[i, j] < freqsol:\n hilbert_spectrum[t[i], p[i, j]] += amp[i, j]\n\n hilbert_spectrum = abs(hilbert_spectrum)\n fig1 = plt.figure(figsize=(5, 5))\n plot_imfs(data, imfs, time_samples=time, fig=fig1)\n\n fig2 = plt.figure(figsize=(5, 5))\n plot_frequency(data, freq.T, time_samples=time, fig=fig2)\n\n fig0 = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n c = ax.contourf(np.linspace(t0, t1, timesol), bins,\n hilbert_spectrum.T) # , colors=('whites','lategray','navy','darkgreen','gold','red')\n ax.invert_yaxis()\n ax.set_yticks(np.linspace(1, 11, 11))\n Yticks = [float(math.pow(2, p)) for p in np.linspace(1, 11, 11)] # make 2^periods\n ax.set_yticklabels(Yticks)\n ax.set_xlabel('Time', fontsize=8)\n ax.set_ylabel('Period', fontsize=8)\n position = fig3.add_axes([0.2, -0., 0.6, 0.01])\n cbar = plt.colorbar(c, cax=position, orientation='horizontal')\n cbar.set_label('Power')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.tick_params", "numpy.abs", "numpy.linspace", "numpy.asarray", "matplotlib.pyplot.figure", "numpy.arctan2", "numpy.ceil", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "numpy.unwrap", "numpy.diff", "numpy.max", "numpy.digitize", "matplotlib.pyplot.show", "numpy.zeros", "scipy.signal.hilbert" ] ]
everymind/SurprisingMinds-Analysis
[ "eeb308043f471de3cdb505f82461cf8d6cf40e16" ]
[ "PythonAnalysisScripts/preprocessing/Average_Clip_Per_Day_PupilDetection_MakingFigs.py" ]
[ "import os\nimport glob\nimport cv2\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport zipfile\nimport shutil\nimport fnmatch\nimport sys\nimport math\nimport csv\n\n### FUNCTIONS ###\ndef unpack_to_temp(path_to_zipped, path_to_temp):\n try:\n # copy zip file to current working directory\n #print(\"Copying {folder} to current working directory...\".format(folder=path_to_zipped))\n current_working_directory = os.getcwd()\n copied_zipped = shutil.copy2(path_to_zipped, current_working_directory)\n path_to_copied_zipped = os.path.join(current_working_directory, copied_zipped.split(sep=os.sep)[-1])\n # unzip the folder\n #print(\"Unzipping files in {folder}...\".format(folder=path_to_copied_zipped))\n day_unzipped = zipfile.ZipFile(path_to_copied_zipped, mode=\"r\")\n # extract files into temp folder\n day_unzipped.extractall(path_to_temp)\n # close the unzipped file\n day_unzipped.close()\n #print(\"Finished unzipping {folder}!\".format(folder=path_to_copied_zipped))\n # destroy copied zipped file\n #print(\"Deleting {file}...\".format(file=path_to_copied_zipped))\n os.remove(path_to_copied_zipped)\n #print(\"Deleted {file}!\".format(file=path_to_copied_zipped))\n return True\n except Exception: \n print(\"Could not unzip {folder}\".format(folder=path_to_zipped)) \n return False\n\ndef list_sub_folders(path_to_root_folder):\n # List all sub folders\n sub_folders = []\n for folder in os.listdir(path_to_root_folder):\n if(os.path.isdir(os.path.join(path_to_root_folder, folder))):\n sub_folders.append(os.path.join(path_to_root_folder, folder))\n return sub_folders\n\ndef find_target_frame(ref_timestamps_csv, target_timestamps_csv, ref_frame):\n # Find the frame in one video that best matches the timestamp of ref frame from another video\n # Get ref frame time\n ref_timestamp = ref_timestamps_csv[ref_frame]\n ref_timestamp = ref_timestamp.split('+')[0][:-1]\n ref_time = datetime.datetime.strptime(ref_timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n # Generate delta times (w.r.t. start_frame) for every frame timestamp\n frame_counter = 0\n for timestamp in target_timestamps_csv:\n timestamp = timestamp.split('+')[0][:-1]\n time = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n timedelta = ref_time - time\n seconds_until_alignment = timedelta.total_seconds()\n if(seconds_until_alignment < 0):\n break\n frame_counter = frame_counter + 1\n return frame_counter\n\n# making figures\n#list_of_circles = circles[0]\n#source_image = blurred\ndef find_darkest_circle(list_of_circles, source_image):\n #print(\"Finding darkest circle in {list}...\".format(list=list_of_circles))\n # starting parameters\n darkest_intensity = 255\n darkest_index = 0\n # check that source_image is a grayscaled image\n if len(source_image.shape) > 2: \n print(\"{Image} is not grayscale!\".format(Image=source_image))\n exit()\n for i in range(len(list_of_circles)):\n # make a copy of the source image\n copied_image = source_image.copy()\n # create a mask image that is the same size as source_image\n mask = np.zeros(copied_image.shape, copied_image.dtype)\n # get center coordinates and radius of circle from list_of_circle\n center = (list_of_circles[i][0], list_of_circles[i][1])\n radius = list_of_circles[i][2]\n #print(\"Center: {x},{y}\".format(x=center[0], y=center[1]))\n # draw mask circle at coordinates and w/radius of circle from list_of_circles\n mask_circle = cv2.circle(mask, center, radius, 255, -1)\n # get coordinates of mask circle pixels\n where = np.where(mask==255)\n # find those same coordinates in source_image\n intensity_inside_circle_on_source_image = source_image[where[0], where[1]]\n ## for debugging and making images for figures\n #this_circle = cv2.circle(copied_image, center, radius, (0, 0, 0), 5)\n #plt.imshow(this_circle)\n #plt.show()\n # take average of those pixels in source_image\n average_intensity = np.average(intensity_inside_circle_on_source_image)\n #print(\"Average intensity of circle {number}: {intensity}\".format(number=i, intensity=average_intensity))\n # check this circle's intensity against darkest circle found so far\n if (average_intensity < darkest_intensity):\n darkest_intensity = average_intensity\n darkest_index = i\n #print(\"Darkest circle: {number}, intensity {intensity}\".format(number=darkest_index, intensity=darkest_intensity))\n return list_of_circles[darkest_index]\n\ndef make_time_buckets(start_timestamp, bucket_size_ms, end_timestamp, fill_pattern): \n start_timestamp = start_timestamp.split('+')[0][:-3]\n end_timestamp = end_timestamp.split('+')[0][:-3]\n buckets_start_time = datetime.datetime.strptime(start_timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n buckets_end_time = datetime.datetime.strptime(end_timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n\n current_bucket = buckets_start_time\n time_buckets = []\n window = datetime.timedelta(milliseconds=bucket_size_ms)\n while current_bucket <= buckets_end_time:\n time_buckets.append(current_bucket)\n current_bucket = current_bucket + window\n\n bucket_list = dict.fromkeys(time_buckets)\n\n for key in time_buckets: \n bucket_list[key] = fill_pattern\n # -5 remains in a time bucket, this means no 'near-enough timestamp' frame was found in video\n\n return bucket_list\n\ndef find_nearest_timestamp_key(timestamp_to_check, dict_of_timestamps, time_window):\n for key in dict_of_timestamps.keys():\n if key <= timestamp_to_check <= (key + time_window):\n return key\n\n# making figs\nwhich_eye = \"right\"\nwhich_stimuli = stimuli_name\ntrial_number = current_trial\nvideo_path = right_video_path\nvideo_timestamps = right_eye_timestamps\nalign_frame = 0\ncsv_path = csv_folder\nbucket_size_ms = bucket_size\ndef find_pupil(which_eye, which_stimuli, trial_number, video_path, video_timestamps, align_frame, csv_path, bucket_size_ms):\n ### row = timestamp, not frame #\n # Open eye video and world video\n video = cv2.VideoCapture(video_path)\n # Jump to specific frame (position) for alignment purposes \n ret = video.set(cv2.CAP_PROP_POS_FRAMES, align_frame)\n # Open display window for debugging\n video_name = video_path.split(os.sep)[-1]\n debug_name = \"Eye\"+\"_\"+video_name\n cv2.namedWindow(debug_name)\n # each time bucket = 4ms (eye cameras ran at 60fps, aka 16.6666 ms per frame)\n # octobpus clip to thank you screen is 16.2 seconds\n first_timestamp = video_timestamps[align_frame]\n last_timestamp = video_timestamps[-1]\n initialize_pattern = [-5,-5,-5,-5,-5,-5]\n pupil_buckets = make_time_buckets(first_timestamp, bucket_size_ms, last_timestamp, initialize_pattern)\n # Loop through 4ms time buckets of eye video to find nearest frame and save pupil xy positon and area\n timestamps_to_check = video_timestamps[align_frame:]\n for timestamp in timestamps_to_check:\n # find the time bucket into which this frame falls\n timestamp = timestamp.split('+')[0][:-3]\n timestamp_dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n bucket_window = datetime.timedelta(milliseconds=bucket_size_ms)\n current_key = find_nearest_timestamp_key(timestamp_dt, pupil_buckets, bucket_window)\n # Read frame at current position\n ret, frame = video.read()\n mask = np.copy(frame)\n # Make sure the frame exists!\n if frame is not None:\n # Magically find pupil...\n # Convert to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n # Median blur\n blurred = cv2.medianBlur(gray, 25)\n # Hough circle detection\n rows = blurred.shape[0]\n ## WTF DOES HOUGHCIRCLES DO??\n ## sometimes the image seems really clean and easy to find the pupil and yet it still fails\n circles = cv2.HoughCircles(blurred, cv2.HOUGH_GRADIENT, 1.0, rows / 9,\n param1=55, param2=20,\n minRadius=10, maxRadius=150)\n # If there are no circles, then what??\n if circles is not None:\n #print(\"Circles found: {circles}\".format(circles=circles))\n # check that we are taking the darkest circle\n darkest_circle = find_darkest_circle(circles[0], blurred)\n #print(\"Darkest circle: {circle}\".format(circle=darkest_circle))\n # Using the best circle...crop around center\n # Threshold\n # Fit an ellipse\n # Crop\n eye_circle = np.uint16(np.around(darkest_circle))\n left = eye_circle[0] - 64\n top = eye_circle[1] - 64\n crop_size = 128\n # Check boundarys of image\n if( (left >= 0) and (top >= 0) and ((left + crop_size) < 800) and ((top + crop_size) < 600) ):\n cropped = blurred[top:(top + crop_size), left:(left+crop_size)]\n # Compute average and stdev of all pixel luminances along border\n ## this currently averages the rightmost and leftmost edges of the cropped window, because we assume that these pixels are not the pupil\n avg = (np.mean(cropped[:, 0]) + np.mean(cropped[:, -1])) / 2\n std = (np.std(cropped[:, 0]) + np.std(cropped[:, -1])) / 2\n ## Find shape of pupil\n # Threshold\n ## try removing otsu\n ## try using 2 standard devs away from average instead of 3\n thresholded = np.uint8(cv2.threshold(cropped, avg-(std*4.5), 255, cv2.THRESH_BINARY_INV)[1])\n # Find contours\n contours, heirarchy = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n # if more than one contour\n if len(contours) > 0:\n # Get largest contour\n largest_contour = max(contours, key=cv2.contourArea)\n # sanity check size of largest contour\n ## SHOULD MAKE SURE THAT LARGEST CONTOUR ISN'T BIGGER THAN CROPPED\n #####\n # make sure contour is large enough to fit an ellipse to it\n if(len(largest_contour) > 5):\n # Fit ellipse to largest contour\n ellipse = cv2.fitEllipse(largest_contour)\n # Shift ellipse back to full frame coordinates\n shifted_center = (np.int(ellipse[0][0]) + left, np.int(ellipse[0][1]) + top)\n # Draw circles\n frame_copy = frame.copy()\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n center = (i[0], i[1])\n # circle center\n cv2.circle(frame_copy, center, 5, (0, 100, 100), 1)\n # circle outline\n radius = i[2]\n cv2.circle(frame_copy, center, radius, (255, 0, 255), 1)\n # Draw ellipse around largest contour\n axes = (np.int(ellipse[1][0]/2),np.int(ellipse[1][1]/2)) \n angle = np.int(ellipse[2])\n frame_copy = cv2.ellipse(frame_copy, shifted_center, axes, angle, 0, 360, (0, 255, 0), 3, cv2.LINE_AA, 0)\n # Draw debugging circle around darkest circle\n axes = (darkest_circle[2], darkest_circle[2]) \n angle = 0\n frame_copy = cv2.ellipse(frame_copy, (darkest_circle[0], darkest_circle[1]), axes, angle, 0, 360, (255, 0, 0), 2, cv2.LINE_AA, 0)\n # Save Data\n darkest_circle_area = np.pi*(darkest_circle[2])**2\n #print(\"Pupil Size predicted by ellipses: {area}\".format(area=cv2.contourArea(largest_contour)))\n #print(\"Pupil size predicted by circles: {area1}\".format(area1=darkest_circle_area))\n # save data from both findContours and find_darkest_circle\n pupil_buckets[current_key][0] = shifted_center[0]\n pupil_buckets[current_key][1] = shifted_center[1]\n pupil_buckets[current_key][2] = cv2.contourArea(largest_contour)\n pupil_buckets[current_key][3] = darkest_circle[0]\n pupil_buckets[current_key][4] = darkest_circle[1]\n pupil_buckets[current_key][5] = (darkest_circle[2]**2) * math.pi\n # Fill debug displays and show\n cv2.imshow(debug_name, frame_copy)\n ret = cv2.waitKey(1)\n else:\n #print(\"Pupil Size: n/a (too small)\")\n pupil_buckets[current_key][2] = -1\n pupil_buckets[current_key][5] = -1\n else:\n #print(\"Pupil Size: n/a (pupil off screen)\")\n pupil_buckets[current_key][2] = -2\n pupil_buckets[current_key][5] = -2\n else:\n #print(\"Pupil Size: n/a (no contour)\")\n pupil_buckets[current_key][2] = -3\n pupil_buckets[current_key][5] = -3\n else:\n #print(\"Pupil Size: n/a (no circles)\")\n pupil_buckets[current_key][2] = -4\n pupil_buckets[current_key][5] = -4\n ## STILL DOING THIS?????\n # Add current frame to average clip at correct slot\n #day_avg_clip[:,:,f] = day_avg_clip[:,:,f] + gray\n # Save pupil size data\n # HOW TO SAVE A DICTIONARY AS A CSV????\n time_chunks = []\n for key in pupil_buckets.keys():\n time_chunks.append(key)\n time_chunks = sorted(time_chunks)\n pupils = []\n for time in time_chunks:\n pupil = pupil_buckets[time]\n pupils.append(pupil)\n #print(\"Saving csv of positions and areas for {eye} eye...\".format(eye=which_eye))\n padded_filename = which_eye + \"_\" + which_stimuli + \"_\" + str(trial_number).zfill(4) + \".csv\"\n csv_file = os.path.join(csv_path, padded_filename)\n np.savetxt(csv_file, pupils, fmt='%.2f', delimiter=',')\n # release video capture\n video.release()\n cv2.destroyAllWindows()\n\ndef save_average_clip_images(which_eye, no_of_seconds, save_folder_path, images):\n # Save images from trial clip to folder\n #print(\"Saving averaged frames from {eye}...\".format(eye=which_eye))\n for f in range(no_of_seconds):\n\n # Create file name with padded zeros\n padded_filename = which_eye + str(f).zfill(4) + \".png\"\n\n # Create image file path from save folder\n image_file_path = os.path.join(save_folder_path, padded_filename)\n\n # Extract gray frame from clip\n gray = np.uint8(images[:,:,f] * 255)\n\n # Write to image file\n ret = cv2.imwrite(image_file_path, gray)\n\n### -------------------------------------------- ###\n### LET THE ANALYSIS BEGIN!! ###\n### log everything in a text file\ncurrent_working_directory = os.getcwd()\nclass Logger(object):\n def __init__(self):\n # grab today's date\n now = datetime.datetime.now()\n todays_datetime = datetime.datetime.today().strftime('%Y%m%d-%H%M%S')\n log_filename = \"PupilDetection_figures_log_\" + now.strftime(\"%Y-%m-%d_%H-%M-%S\") + \".txt\"\n log_file = os.path.join(current_working_directory, log_filename)\n self.terminal = sys.stdout\n self.log = open(log_file, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message) \n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n pass \nsys.stdout = Logger()\n### ------------------------------------------- ###\n# list all folders in Synology drive\n# on lab computer\ndata_drive = r\"\\\\Diskstation\\SurprisingMinds\"\n### FOR DEBUGGING ON LAPTOP ###\n#data_drive = r'C:\\Users\\taunsquared\\Desktop\\SM_temp'\n# get the subfolders, sort their names\ndata_folders = sorted(os.listdir(data_drive))\nzipped_data = fnmatch.filter(data_folders, '*.zip')\nzipped_names = [item[:-4] for item in zipped_data]\n# skip first day because it was an exhibit debugging day\nzipped_data = zipped_data[1:]\n# figure out which days have already been analysed\n# when working from local drive, lab computer\nanalysed_drive = r\"C:\\Users\\KAMPFF-LAB-VIDEO\\Dropbox\\SurprisingMinds\\analysis\\dataPythonWorkflows\"\n# when working from laptop\n#analysed_drive = r\"C:\\Users\\taunsquared\\Dropbox\\SurprisingMinds\\analysis\\dataPythonWorkflows\"\nanalysed_folders = sorted(os.listdir(analysed_drive))\nalready_analysed = [item for item in zipped_names if item in analysed_folders]\n# unzip each folder, do the analysis\nfor item in zipped_data:\n \n # check to see if this folder has already been analyzed\n if item[:-4] in already_analysed:\n print(\"Folder {name} has already been analysed\".format(name=item))\n continue\n \n # if this folder hasn't already been analysed, full speed ahead!\n print(\"Working on folder {name}\".format(name=item))\n this_day_date = item[:-4].split('_')[1]\n # grab a folder \n day_zipped = os.path.join(data_drive, item)\n\n # Build relative analysis paths in a folder with same name as zip folder\n analysis_folder = os.path.join(analysed_drive, item[:-4], \"Analysis\")\n\n # Analysis subfolders\n csv_folder = os.path.join(analysis_folder, \"csv\")\n alignment_folder = os.path.join(analysis_folder, \"alignment\")\n\n # Create analysis folder (and sub-folders) if it (they) does (do) not exist\n if not os.path.exists(analysis_folder):\n #print(\"Creating analysis folder.\")\n os.makedirs(analysis_folder)\n if not os.path.exists(csv_folder):\n #print(\"Creating csv folder.\")\n os.makedirs(csv_folder)\n if not os.path.exists(alignment_folder):\n #print(\"Creating alignment folder.\")\n os.makedirs(alignment_folder)\n\n # create a temp folder in current working directory to store data (contents of unzipped folder)\n day_folder = os.path.join(current_working_directory, \"figures_temp\")\n\n # unzip current zipped folder into temp folder, this function checks whether the folder is unzippable\n # if it unzips, the function returns True; if it doesn't unzip, the function returns False\n if unpack_to_temp(day_zipped, day_folder):\n\n # List all trial folders\n trial_folders = list_sub_folders(day_folder)\n num_trials = len(trial_folders)\n current_trial = 0\n stim_vids = [24.0, 25.0, 26.0, 27.0, 28.0, 29.0]\n stim_name_to_float = {\"stimuli024\": 24.0, \"stimuli025\": 25.0, \"stimuli026\": 26.0, \"stimuli027\": 27.0, \"stimuli028\": 28.0, \"stimuli029\": 29.0}\n stim_float_to_name = {24.0: \"stimuli024\", 25.0: \"stimuli025\", 26.0: \"stimuli026\", 27.0: \"stimuli027\", 28.0: \"stimuli028\", 29.0: \"stimuli029\"}\n for trial_folder in trial_folders:\n # add exception handling so that a weird day doesn't totally break everything \n try:\n trial_name = trial_folder.split(os.sep)[-1]\n # Load CSVs and create timestamps\n # ------------------------------\n # Get world movie timestamp csv path\n world_csv_path = glob.glob(trial_folder + '/*world.csv')[0]\n stimuli_name = world_csv_path.split(\"_\")[-2]\n stimuli_number = stim_name_to_float[stimuli_name]\n # at what time resolution to build eye and world camera data?\n bucket_size = 4 #milliseconds\n\n # Load world CSV\n world_timestamps = np.genfromtxt(world_csv_path, dtype=np.str, delimiter=' ')\n\n # Get eye timestamp csv paths\n right_eye_csv_path = glob.glob(trial_folder + '/*righteye.csv')[0]\n left_eye_csv_path = glob.glob(trial_folder + '/*lefteye.csv')[0]\n\n # Load eye CSVs\n right_eye_timestamps = np.genfromtxt(right_eye_csv_path, dtype=np.str, delimiter=' ')\n left_eye_timestamps = np.genfromtxt(left_eye_csv_path, dtype=np.str, delimiter=' ')\n # Get world video filepath\n world_video_path = glob.glob(trial_folder + '/*world.avi')[0]\n # ------------------------------\n # ------------------------------\n # Now start pupil detection \n # ------------------------------\n # Get right eye video filepath\n right_video_path = glob.glob(trial_folder + '/*righteye.avi')[0]\n # Get left eye video filepath\n left_video_path = glob.glob(trial_folder + '/*lefteye.avi')[0]\n \n # Find right eye pupils and save pupil data\n print(\"Finding right eye pupils...\")\n find_pupil(\"right\", stimuli_name, current_trial, right_video_path, right_eye_timestamps, 0, csv_folder, bucket_size)\n # Find left eye pupils and save pupil data\n print(\"Finding left eye pupils...\")\n find_pupil(\"left\", stimuli_name, current_trial, left_video_path, left_eye_timestamps, 0, csv_folder, bucket_size)\n \n # Report progress\n cv2.destroyAllWindows()\n print(\"Finished Trial: {trial}\".format(trial=current_trial))\n current_trial = current_trial + 1\n except Exception: \n cv2.destroyAllWindows()\n print(\"Trial {trial} failed!\".format(trial=current_trial))\n current_trial = current_trial + 1\n\n # report progress\n world_video.release()\n cv2.destroyAllWindows()\n print(\"Finished {day}\".format(day=day_zipped[:-4]))\n\n # delete temporary file with unzipped data contents\n print(\"Deleting temp folder of unzipped data...\")\n shutil.rmtree(day_folder)\n print(\"Delete successful!\")\n\n#FIN\nprint(\"Completed analysis on all data folders in this drive!\")\n# close logfile\nsys.stdout.close()" ]
[ [ "numpy.uint8", "numpy.around", "numpy.genfromtxt", "numpy.int", "numpy.copy", "numpy.std", "numpy.mean", "numpy.savetxt", "numpy.average", "numpy.zeros", "numpy.where" ] ]
mli0603/PSMNet
[ "52e36b09529225ffd38a1ceef86976087350b987" ]
[ "dataloader/KITTILoader.py" ]
[ "import os\nimport torch\nimport torch.utils.data as data\nimport torch\nimport torchvision.transforms as transforms\nimport random\nfrom PIL import Image, ImageOps\nimport numpy as np\nfrom dataloader import preprocess\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\ndef disparity_loader(path):\n return Image.open(path)\n\n\nclass myImageFloder(data.Dataset):\n def __init__(self, left, right, left_disparity, training, loader=default_loader, dploader=disparity_loader):\n \n self.left = left\n self.right = right\n self.disp_L = left_disparity\n self.loader = loader\n self.dploader = dploader\n self.training = training\n\n def __getitem__(self, index):\n left = self.left[index]\n right = self.right[index]\n disp_L= self.disp_L[index]\n\n left_img = self.loader(left)\n right_img = self.loader(right)\n dataL = self.dploader(disp_L)\n\n\n if self.training: \n w, h = left_img.size\n th, tw = 256, 512\n \n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n\n left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))\n right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))\n\n dataL = np.ascontiguousarray(dataL,dtype=np.float32)/256\n dataL = dataL[y1:y1 + th, x1:x1 + tw]\n\n processed = preprocess.get_transform(augment=False) \n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return left_img, right_img, dataL\n else:\n w, h = left_img.size\n\n left_img = left_img.crop((w-1232, h-368, w, h))\n right_img = right_img.crop((w-1232, h-368, w, h))\n w1, h1 = left_img.size\n\n dataL = dataL.crop((w-1232, h-368, w, h))\n dataL = np.ascontiguousarray(dataL,dtype=np.float32)/256\n\n processed = preprocess.get_transform(augment=False) \n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return left_img, right_img, dataL\n\n def __len__(self):\n return len(self.left)\n" ]
[ [ "numpy.ascontiguousarray" ] ]
margaretkennedy/deephaven-core
[ "d1656f0994a42b5fdf51c41c49a63e8d8a2b4088" ]
[ "Integrations/python/test/testPlot.py" ]
[ "#\n# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending\n#\n\n\n##############################################################################\n# NOTE: the jvm should have been initialized, or this test will certainly fail\n##############################################################################\n\nimport sys\nimport tempfile\nimport os\nimport numpy\n\nfrom deephaven import TableTools, ComboAggregateFactory\n\n# NB: these two modules cannot even be imported successfully without the jvm initialized\nimport deephaven.Plot as Plot\n\nif sys.version_info[0] < 3:\n import unittest2 as unittest\nelse:\n import unittest\n\n\nclass TestPlot(unittest.TestCase):\n \"\"\"\n Test cases for the illumon.iris.Plot module\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Inherited method allowing initialization of test environment\n \"\"\"\n cls.table = TableTools.emptyTable(200).update(\"timestamp=new DBDateTime((long)(i/2)*1000000000)\",\n \"Sym=((i%2 == 0) ? `MSFT` : `AAPL`)\",\n \"price=(double)((i%2 == 0) ? 100.0 + (i/2) + 5*Math.random() : 250.0 + (i/2) + 10*Math.random())\")\n\n longs = numpy.arange(0, 86401, 60, dtype=numpy.int64)\n cls.arrays = {\n 'DBDateTime[]': longs.astype('datetime64[s]'),\n 'long[]': longs,\n 'int[]': longs.astype(numpy.int32),\n 'float[]': longs.astype(numpy.float32),\n 'double[]': longs.astype(numpy.float64),\n }\n\n def testColor(self):\n \"\"\"\n Test suite for color methods\n \"\"\"\n\n with self.subTest(msg=\"color(string)\"):\n color = Plot.color(\"#000000\")\n\n with self.subTest(msg=\"colorHSL(float, float, float)\"):\n color = Plot.colorHSL(0.0, 0.0, 0.0)\n with self.subTest(msg=\"colorHSL(float, float, float, float)\"):\n color = Plot.colorHSL(0.0, 0.0, 0.0, 0.0)\n\n with self.subTest(msg=\"colorRGB(int)\"):\n color = Plot.colorRGB(0)\n with self.subTest(msg=\"colorRGB(int, boolean)\"):\n color = Plot.colorRGB(0, True)\n with self.subTest(msg=\"colorRGB(int, int, int)\"):\n color = Plot.colorRGB(0, 0, 0)\n with self.subTest(msg=\"colorRGB(int, int, int)\"):\n color = Plot.colorRGB(0, 0, 0, 255)\n with self.subTest(msg=\"colorRGB(float, float, float)\"):\n color = Plot.colorRGB(0.0, 0.0, 0.0)\n with self.subTest(msg=\"colorRGB(float, float, float, float)\"):\n color = Plot.colorRGB(0.0, 0.0, 0.0, 1.0)\n del color\n\n with self.subTest(msg=\"colorNames()\"):\n names = Plot.colorNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"colorNames() values {}\".format(names))\n\n def testFont(self):\n \"\"\"\n Test suite for font methods\n \"\"\"\n\n fstyle = None\n with self.subTest(msg=\"fontStyle(string)\"):\n fstyle = Plot.fontStyle(\"bold_italic\") # not case sensitive\n\n with self.subTest(msg=\"font(string, string, int)\"):\n font = Plot.font(\"Arial\", \"p\", 10) # (family, style, size)\n with self.subTest(msg=\"font(string, fontstyle, int)\"):\n font = Plot.font(\"Arial\", fstyle, 10)\n\n with self.subTest(msg=\"fontFamilyNames()\"):\n names = Plot.fontFamilyNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"fontFamilyNames() values {}\".format(names))\n with self.subTest(msg=\"fontStyleNames()\"):\n names = Plot.fontStyleNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"fontStyleNames() values {}\".format(names))\n\n def testLineStyle(self):\n \"\"\"\n Test suite for line style construction\n \"\"\"\n\n with self.subTest(msg=\"lineEndStyleNames()\"):\n names = Plot.lineEndStyleNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"lineEndStyleNames() values {}\".format(names))\n\n with self.subTest(msg=\"lineJoinStyleNames()\"):\n names = Plot.lineJoinStyleNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"lineJoinStyleNames() values {}\".format(names))\n\n endStyle, joinStyle = None, None\n with self.subTest(msg=\"lineEndStyle(string)\"): # should be [BUTT, ROUND, SQUARE], not case sensitive\n endStyle = Plot.lineEndStyle(\"Butt\")\n with self.subTest(msg=\"lineJoinStyle(string)\"): # should be [BEVEL, MITER, ROUND]\n joinStyle = Plot.lineJoinStyle(\"Bevel\")\n\n with self.subTest(msg=\"lineStyle(double, lineEndStyle, lineJoinStyle, double...)\"):\n ls = Plot.lineStyle(4.0, endStyle, joinStyle, 3.0, 3.0)\n with self.subTest(msg=\"lineStyle(double, string, string, double...)\"):\n ls = Plot.lineStyle(4.0, \"butt\", \"bevel\", 3.0, 3.0)\n with self.subTest(msg=\"lineStyle(double)\"):\n ls = Plot.lineStyle(4.0)\n with self.subTest(msg=\"lineStyle(double, int[])\"):\n ls = Plot.lineStyle(4.0, numpy.array([3, 3], dtype=numpy.int32))\n with self.subTest(msg=\"lineStyle(double, long[])\"):\n ls = Plot.lineStyle(4.0, numpy.array([3, 3], dtype=numpy.int64))\n with self.subTest(msg=\"lineStyle(double, float[])\"):\n ls = Plot.lineStyle(4.0, numpy.array([3, 3], dtype=numpy.float32))\n with self.subTest(msg=\"lineStyle(double, double[])\"):\n ls = Plot.lineStyle(4.0, [3.0, 3.0])\n with self.subTest(msg=\"lineStyle(double...)\"):\n ls = Plot.lineStyle(3.0, 3.0)\n\n with self.subTest(msg=\"lineStyle(string, string)\"):\n ls = Plot.lineStyle(\"butt\", \"bevel\")\n with self.subTest(msg=\"lineStyle()\"):\n ls = Plot.lineStyle()\n with self.subTest(msg=\"lineStyle()\"):\n ls = Plot.lineStyle()\n\n # NOTE: These patterns will fail.. [3, 3] -> int[], which doesn't match any function signature\n # note that [3.0, 3.0] -> double[] would match a (different) function signature\n # with self.subTest(msg=\"lineStyle(double, lineEndStyle, lineJoinStyle, List<T>)\"):\n # ls = Plot.lineStyle(4.0, endStyle, joinStyle, [3, 3])\n # with self.subTest(msg=\"lineStyle(double, string, string, List<T>)\"):\n # ls = Plot.lineStyle(4.0, \"butt\", \"bevel\", [3, 3])\n # with self.subTest(msg=\"lineStyle(List<T>)\"):\n # ls = Plot.lineStyle([3, 3])\n\n def testTheme(self):\n \"\"\"\n Theme test suite\n \"\"\"\n\n with self.subTest(msg=\"themeNames()\"):\n names = Plot.themeNames()\n self.assertTrue(isinstance(names, list) and len(names) > 0)\n print(\"themeNames() values {}\".format(names))\n\n def testPlot(self):\n \"\"\"\n plot method calls - possibly expand over time\n \"\"\"\n\n # perform basic Plot.plot(name, x, y), where x, y span every pairwise choice from the self.arrays dictionary\n figure = None\n typs = sorted(self.arrays.keys())\n for i, xtyp in enumerate(typs):\n xarray = self.arrays[xtyp]\n for j, ytyp in enumerate(typs):\n yarray = self.arrays[ytyp]\n with self.subTest(msg=\"plot({}, {})\".format(xtyp, ytyp)):\n series_name = '{}_{}'.format(xtyp, ytyp)\n figure = Plot.plot(series_name, xarray, yarray).show()\n del figure\n\n def testCatPiePlotTroubles(self):\n\n val = [20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 40, 50, 60]\n cat = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\"]\n fig = Plot.piePlot(\"pp2\", cat, val)\n del fig\n\n" ]
[ [ "numpy.arange", "numpy.array" ] ]
andrewdoss/algorithms_practice
[ "671ae4a4ec05b6cf87ee44faf092456444ed3cf0" ]
[ "algorithms_illuminated/part_4/chapter_20/hueristic_tsp.py" ]
[ "\"\"\"Two-Opt local search for Traveling Salesman Problem.\n\nThis module introduces efficient and approximately correct alternatives to\nexhaustive search.\n\"\"\"\n\n\nimport numpy as np\nimport itertools\n\n\ndef read_edge_file(filename):\n \"\"\"Constructs an undirected adjacency matrix from an edge list.\n \n This could be more space-efficient if needed, as half the matrix is\n redundant. \n \n Args:\n filename (str): The file containing the edge list.\n \n Returns:\n 2D-array: An adjacency matrix for the input graph.\n \"\"\"\n edges = []\n vertices = set()\n with open(filename, 'r') as f:\n next(f) # Skip header metadata\n for line in f:\n v1, v2, weight = (float(e) for e in line.strip().split())\n v1, v2 = int(v1), int(v2)\n vertices.add(v1)\n vertices.add(v2)\n edges.append((v1, v2, weight))\n array = np.ones((len(vertices), len(vertices))) * float('inf')\n for i in range(len(vertices)):\n array[i,i] = 0\n for v1, v2, weight in edges:\n array[v1-1,v2-1] = weight\n array[v2-1,v1-1] = weight\n return array\n\n\ndef generate_graph(n, cost_range=(1,100), seed=None):\n \"\"\"Constructs an undirected adjacency matrix from an edge list.\n\n This could be more space-efficient if needed, as half the matrix is\n redundant. \n \n Args:\n n (int): The number of stops/vertices in the problem.\n cost_range (tuple(int, int)): The minimum and maximum edge costs.\n seed (int): optional, the seed for generating random costs, default 1\n \n Returns:\n 2D-array: An adjacency matrix for the input graph.\n \"\"\"\n rs = np.random.RandomState(seed)\n array = rs.randint(*cost_range, (n,n))\n for i in range(n):\n array[i,i] = 0\n return (array + array.T) / 2 # Make the array symmetric\n\n\ndef exhaustive_tsp(graph):\n \"\"\"Performs an exhuastive search over all possible tours.\n \n This implementation includes minimal optimizations.\n \n The first stop is fixed, because otherwise, the same tour\n can be shifted into n equivalent representations, n is the\n number of stops. \n \n Second, for each pair of vertices, of which there are \n n*(n-1)/2 (O(n^2)), when they make up the second and last\n stops, they can only be in one ordering. I will arbitrarily\n require that the second stop be smaller than the last stop\n index.\n \n Args:\n graph (array): The input graph as an adjacency matrix.\n \n Returns:\n float: The cost of the minimum-cost tour.\n list: The sequence of vertices for the minimum-cost tour,\n converted back to 1-based indexing.\n int: The number of unique cycles that were evaluated.\n \"\"\"\n # Fix first stop as 0 to avoid redundant shifts in the tour\n n = graph.shape[0]\n min_cost = float('inf')\n min_tour = None\n for tour in itertools.permutations(range(1,n)):\n if tour[0] < tour[-1]: # Only compute one direction per tour\n tour_cost = graph[0,tour[0]] + graph[tour[-1],0]\n for i in range(1,len(tour)):\n tour_cost += graph[tour[i-1],tour[i]]\n if tour_cost < min_cost:\n min_cost = tour_cost\n min_tour = (1, *(s+1 for s in tour))\n return min_cost, min_tour\n\n\ndef nearest_neighbor_tsp(graph):\n \"\"\"Selects a tour using a greedy nearest-neighbor hueristic.\n \n This approach is simple and very fast, but not correct.\n \n I'm not optimizing with vectorized numpy functions to stick to the\n basic looping implementation.\n \n Args:\n graph (array): The input graph as an adjacency matrix.\n \n Returns:\n float: The cost of the minimum-cost tour.\n list: The sequence of vertices for the minimum-cost tour.\n \"\"\"\n # Track remaining stops\n remaining = set(range(1, graph.shape[0]+1))\n # Select an arbitrary first vertex\n tour = [1]\n remaining.remove(1)\n # Compute tour and cost\n cost = 0\n while len(remaining) > 0:\n min_distance = float('inf')\n nearest_neighbor = None\n for v in remaining:\n distance = graph[tour[-1]-1, v-1]\n if distance < min_distance:\n min_distance = distance\n nearest_neighbor = v\n remaining.remove(nearest_neighbor)\n cost += min_distance\n tour.append(nearest_neighbor)\n # Close the tour\n cost += graph[tour[-1]-1,0]\n return cost, tour\n\n\ndef two_opt_first_tsp(graph, tour, cost):\n \"\"\"Attempts to improve a tour using local search.\n \n This variation stops when any improving swap is found. I can\n also implement a variation where each iteration picks the \n most improving of all improving swaps.\n \n Args:\n graph (array): The input graph as an adjacency matrix.\n tour (list): The initial tour.\n cost (int): The cost of the initial tour.\n \n Returns:\n float: The cost of the final minimum-cost tour.\n list: The sequence of vertices for the minimum-cost tour.\n \"\"\"\n # Enumerate all candidate pairs of edges\n n = len(tour)\n while True:\n result = False\n for i in range(n-2):\n for j in range(i+2, n-1):\n result = two_change(graph, tour, i, i+1, j, j+1)\n if result:\n tour = result[0]\n cost += result[1]\n break\n if result:\n break\n # Handle edge case\n if i > 0:\n result = two_change(graph, tour, i, i+1, n-1, 0)\n if result:\n tour = result[0]\n cost += result[1]\n break\n # Terminate the algorithm if no further improvement possible\n if not result:\n return cost, tour\n \n \ndef two_change(graph, tour, x1, x2, y1, y2):\n \"\"\"Updates the tour, if an improvement, else makes no change.\n \n Args:\n tour (list): The initial tour.\n edge_1 (list): The first edge.\n edge_2 (list): The second edge.\n \n Returns:\n bool: A flag indicating whether the change improved the tour.\n \"\"\"\n # Only first/first and last/last vertices are valid for connecting\n #breakpoint()\n change = (graph[tour[x1]-1, tour[y1]-1] + graph[tour[x2]-1, tour[y2]-1] -\n graph[tour[x1]-1, tour[x2]-1] - graph[tour[y1]-1, tour[y2]-1]) \n if change < 0:\n temp_tour = tour[0:x1+1] + tour[y1:x1:-1] \n if y2 > 0:\n temp_tour += tour[y2:]\n return temp_tour, change\n else:\n return False\n \n\ndef two_opt_min_tsp(graph, tour, cost):\n \"\"\"Attempts to improve a tour using local search.\n \n This variation searches for the most improving swap at\n each iteration.\n \n Args:\n graph (array): The input graph as an adjacency matrix.\n tour (list): The initial tour.\n cost (int): The cost of the initial tour.\n \n Returns:\n float: The cost of the final minimum-cost tour.\n list: The sequence of vertices for the minimum-cost tour.\n \"\"\"\n # Enumerate all candidate pairs of edges\n n = len(tour)\n while True:\n result = False\n best_change = float('inf')\n best_tour = None\n for i in range(n-2):\n for j in range(i+2, n-1):\n result = two_change(graph, tour, i, i+1, j, j+1)\n if result:\n if result[1] < best_change:\n best_change = result[1]\n best_tour = result[0]\n # Handle edge case\n if i > 0:\n result = two_change(graph, tour, i, i+1, n-1, 0)\n if result:\n if result[1] < best_change:\n best_change = result[1]\n best_tour = result[0]\n # Terminate the algorithm if no further improvement possible\n if best_tour is None:\n return cost, tour\n else:\n tour = best_tour\n cost += best_change\n \n \ndef get_tsp_tour(graph, func, preproc=False, seed=None, trials=1):\n \"\"\"Wrapper for testing various TSP approaches.\"\"\"\n # Get a tour for return\n if func.__name__ in ('nearest_neighbor_tsp', 'exhaustive_tsp'):\n cost, tour = func(graph)\n elif func.__name__ in ('two_opt_first_tsp', 'two_opt_min_tsp'):\n cost = float('inf')\n tour = None\n for _ in range(trials):\n temp_cost, temp_tour = get_input_tour(graph, preproc, seed)\n temp_cost, temp_tour = func(graph, temp_tour, temp_cost)\n if temp_cost < cost:\n cost = temp_cost\n tour = temp_tour\n else:\n raise ValueError(f'Requested function {func.__name__} not recognized.')\n return cost, tour\n \n \ndef get_input_tour(graph, preproc, seed):\n \"\"\"Helper for getting input tour for improving algorithms.\"\"\"\n # Get nearest neighbor or random input tour\n if preproc:\n cost, tour = nearest_neighbor_tsp(graph)\n else:\n tour = list(range(1, graph.shape[0]+1))\n np.random.shuffle(tour)\n cost = 0\n for i in range(len(tour)-1):\n cost += graph[tour[i]-1, tour[i+1]-1]\n cost += graph[tour[-1]-1, tour[0]-1]\n return cost, tour\n\n\nif __name__ == '__main__':\n import time\n import pandas as pd\n\n # Define number of repeats per configuration per input size\n NUM_REPEATS = 25\n\n # Define various configurations to test\n # The configuration tuples are defined as follows:\n # (hueristic, whether to preprocess with nearest-neighbors, number of repeats)\n configs = [(exhaustive_tsp, False, 1),\n (nearest_neighbor_tsp, False, 1),\n (two_opt_first_tsp, False, 1),\n (two_opt_min_tsp, False, 1),\n (two_opt_first_tsp, True, 1),\n (two_opt_min_tsp, True, 1),\n (two_opt_first_tsp, False, 50),\n (two_opt_min_tsp, False, 50)]\n\n # First, run correctness tests\n test_cases = [('tsptest1.txt', 13),\n ('tsptest2.txt', 23)]\n \n print('Starting correctness tests...')\n for test_case in test_cases:\n graph = read_edge_file(test_case[0])\n for config in configs:\n cost, tour = get_tsp_tour(graph, config[0], config[1], seed=1)\n print(f'{config[0].__name__} with preproc={config[1]} on {test_case[0]}, {100*cost / test_case[1]:.1f}% of optimal cost.')\n print('Passed all correctness tests.')\n\n # Second, run performance tests\n print('Starting performance tests...')\n results = []\n for i in range(9, 13):\n result_row = {'size':i}\n df_repeats = []\n for r in range(NUM_REPEATS):\n result_repeat = {}\n graph = generate_graph(i, seed=r)\n for j, config in enumerate(configs):\n start = time.time()\n cost, _ = get_tsp_tour(graph, config[0], config[1], seed=r, trials=config[2])\n result_repeat[f'{config[0].__name__}_{config[1]}_{config[2]}_time'] = time.time() - start\n result_repeat[f'{config[0].__name__}_{config[1]}_{config[2]}_cost'] = cost\n df_repeats.append(result_repeat)\n df_repeats = pd.DataFrame(df_repeats)\n for col in df_repeats.columns:\n result_row[f'{col}_mean'] = df_repeats[col].mean().round(4)\n #result_row[f'{col}_std'] = df_repeats[col].std().round(4)\n results.append(result_row)\n print('Completed all performance tests.')\n\n # Formatting and display, could improve this later with plotting or \n # % of exhaustive time/cost representation\n df_results = pd.DataFrame(results).set_index('size')\n df_time = df_results[[col for col in df_results.columns if 'time' in col]]\n df_cost = df_results[[col for col in df_results.columns if 'cost' in col]]\n print('Timing Results:')\n print(df_time.T)\n print('Tour Cost Results:')\n print(df_cost.T) " ]
[ [ "numpy.random.RandomState", "numpy.random.shuffle", "pandas.DataFrame" ] ]
nashory/rtic-gcn-pytorch
[ "75c3a6d6c05dd69f1260db7b0b4bfaca20c56d5d" ]
[ "model/compose_ae.py" ]
[ "import math\nimport random\nimport string\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.models as M\nfrom einops import rearrange\nfrom torch.autograd import Variable\n\nimport model.resnet as resnet\nfrom model.base import ImageEncoderTextEncoderBase\n\n\nclass ComposeAECompositionModule(nn.Module):\n def __init__(self, in_c_text, in_c_img):\n super(ComposeAECompositionModule, self).__init__()\n self.CONJUGATE = Variable(\n torch.cuda.FloatTensor(10000, 1).fill_(-1.0), requires_grad=False\n ) # large enough values.\n self.a = torch.nn.Parameter(torch.tensor([1.0, 10.0, 1.0, 1.0]))\n\n self.encoderLinear = torch.nn.Sequential(\n ComplexProjectionModule(in_c_img, in_c_text),\n LinearMapping(in_c_img),\n )\n\n self.encoderWithConv = torch.nn.Sequential(\n ComplexProjectionModule(in_c_img, in_c_text),\n ConvMapping(in_c_img),\n )\n\n self.decoder = torch.nn.Sequential(\n torch.nn.BatchNorm1d(in_c_img),\n torch.nn.ReLU(),\n torch.nn.Linear(in_c_img, in_c_img),\n torch.nn.ReLU(),\n torch.nn.Linear(in_c_img, in_c_img),\n )\n\n self.txtdecoder = torch.nn.Sequential(\n torch.nn.BatchNorm1d(in_c_img),\n torch.nn.ReLU(),\n torch.nn.Linear(in_c_img, in_c_text),\n torch.nn.ReLU(),\n torch.nn.Linear(in_c_text, in_c_text),\n )\n\n def forward(self, x):\n f_img, f_text = x\n theta_linear = self.encoderLinear((f_img, f_text, self.CONJUGATE))\n theta_conv = self.encoderWithConv((f_img, f_text, self.CONJUGATE))\n theta = theta_linear * self.a[1] + theta_conv * self.a[0]\n\n dct_with_representations = {\n \"repres\": theta,\n \"repr_to_compare_with_source\": self.decoder(theta),\n \"repr_to_compare_with_mods\": self.txtdecoder(theta),\n }\n\n return dct_with_representations\n\n\nclass ComposeAE(ImageEncoderTextEncoderBase):\n \"\"\"The ComposeAE model.\n\n The method is described in\n Muhammad Umer Anwaar, Egor Labintcev and Martin Kleinsteuber.\n ``Compositional Learning of Image-Text Query for Image Retrieval\"\n arXiv:2006.11149\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ComposeAE, self).__init__(**kwargs)\n\n self.l2_loss = nn.MSELoss().to(\"cuda\")\n\n self.model[\"composeae_compose_it\"] = ComposeAECompositionModule(\n in_c_text=self.out_feature_text,\n in_c_img=self.out_feature_image,\n )\n\n # define model\n self.model = nn.ModuleDict(self.model)\n\n def update(self, output, opt):\n \"\"\"\n output = {\n \"f_img_c\",\n \"f_img_t\",\n \"f_text\",\n \"f_cit_t\": {\n \"repres\",\n \"repr_to_compare_with_source\",\n \"repr_to_compare_with_mods\"\n }\n }\n \"\"\"\n\n # assign input\n f_img_t_without_norm = output[\"f_img_t\"]\n f_img_t = self.model[\"norm\"](output[\"f_img_t\"]) # target\n f_cit_t = self.model[\"norm\"](output[\"f_cit_t\"][\"repres\"]) # manipulated\n\n # loss\n loss = self.model[\"criterion\"](f_img_t, f_cit_t)\n\n # rotational symmetry Loss\n conjugate_f_cit_t = self.compose_img_text(\n f_img_t_without_norm, output[\"f_text\"]\n )\n composed_target_image_features = self.model[\"norm\"](conjugate_f_cit_t[\"repres\"])\n source_image_features = self.model[\"norm\"](output[\"f_img_c\"])\n\n loss += self.model[\"criterion\"](\n composed_target_image_features, source_image_features\n )\n loss += self.l2_loss(\n output[\"f_cit_t\"][\"repr_to_compare_with_source\"], output[\"f_img_c\"]\n )\n loss += self.l2_loss(\n output[\"f_cit_t\"][\"repr_to_compare_with_mods\"], output[\"f_text\"]\n )\n\n # backward\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n # return log\n log_data = dict()\n log_data[\"loss\"] = float(loss.data)\n return log_data\n\n def get_manipulated_image_feature(self, x):\n \"\"\"\n x = {\n 'c_img': c_img,\n 'c_iid': data['c_iid'],\n 't_iid': data['t_iid'],\n 'mod_key': data['mod_key'],\n 'mod_str': mod_str,\n }\n \"\"\"\n\n f_img_c = self.extract_image_feature(x[\"c_img\"])\n f_text = self.extract_text_feature(x[\"mod_str\"])\n f_cit_t = self.compose_img_text(f_img_c, f_text)\n return self.model[\"norm\"](f_cit_t[\"repres\"])\n\n def compose_img_text(self, f_img, f_text):\n return self.model[\"composeae_compose_it\"]((f_img, f_text))\n\n def forward(self, x):\n \"\"\"\n data = {\n 'c_img': c_img,\n 'c_cap': c_cap,\n 't_img': t_img,\n 't_cap': t_cap,\n 'mod_key': mod_key,\n 'mod_str': mod_str,\n }\n \"\"\"\n f_img_c = self.extract_image_feature(x[\"c_img\"])\n f_img_t = self.extract_image_feature(x[\"t_img\"])\n f_text = self.extract_text_feature(x[\"mod_str\"])\n f_cit_t = self.compose_img_text(f_img_c, f_text)\n\n return dict(f_img_c=f_img_c, f_img_t=f_img_t, f_text=f_text, f_cit_t=f_cit_t)\n\n\nclass ComplexProjectionModule(torch.nn.Module):\n def __init__(self, image_embed_dim=1024, text_embed_dim=1024):\n super().__init__()\n self.text_features = torch.nn.Sequential(\n torch.nn.BatchNorm1d(text_embed_dim),\n torch.nn.Linear(text_embed_dim, image_embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(image_embed_dim, image_embed_dim),\n )\n self.image_features = torch.nn.Sequential(\n torch.nn.BatchNorm1d(image_embed_dim),\n torch.nn.Linear(image_embed_dim, image_embed_dim),\n torch.nn.Dropout(p=0.5),\n torch.nn.ReLU(),\n torch.nn.Linear(image_embed_dim, image_embed_dim),\n )\n\n def forward(self, x):\n x1 = self.image_features(x[0])\n x2 = self.text_features(x[1])\n # default value of CONJUGATE is 1. Only for rotationally symmetric loss value is -1.\n # which results in the CONJUGATE of text features in the complex space\n CONJUGATE = x[2]\n num_samples = x[0].shape[0]\n CONJUGATE = CONJUGATE[:num_samples]\n delta = x2 # text as rotation\n re_delta = torch.cos(delta)\n im_delta = CONJUGATE * torch.sin(delta)\n\n re_score = x1 * re_delta\n im_score = x1 * im_delta\n\n concat_x = torch.cat([re_score, im_score], 1)\n x0copy = x[0].unsqueeze(1)\n x1 = x1.unsqueeze(1)\n x2 = x2.unsqueeze(1)\n re_score = re_score.unsqueeze(1)\n im_score = im_score.unsqueeze(1)\n\n return concat_x, x1, x2, x0copy, re_score, im_score\n\n\nclass LinearMapping(torch.nn.Module):\n \"\"\"\n This is linear mapping to image space. rho(.)\n \"\"\"\n\n def __init__(self, image_embed_dim=1024):\n super().__init__()\n self.mapping = torch.nn.Sequential(\n torch.nn.BatchNorm1d(2 * image_embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * image_embed_dim, image_embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(image_embed_dim, image_embed_dim),\n )\n\n def forward(self, x):\n theta_linear = self.mapping(x[0])\n return theta_linear\n\n\nclass ConvMapping(torch.nn.Module):\n \"\"\"\n This is convoultional mapping to image space. rho_conv(.)\n \"\"\"\n\n def __init__(self, image_embed_dim=1024):\n super().__init__()\n self.image_embed_dim = image_embed_dim\n self.mapping = torch.nn.Sequential(\n torch.nn.BatchNorm1d(2 * image_embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(2 * image_embed_dim, image_embed_dim),\n torch.nn.ReLU(),\n torch.nn.Linear(image_embed_dim, image_embed_dim),\n )\n # in_channels, output channels\n self.conv = torch.nn.Conv1d(5, 64, kernel_size=3, padding=1)\n self.adaptivepooling = torch.nn.AdaptiveMaxPool1d(2 * image_embed_dim // 64)\n\n def forward(self, x):\n concat_features = torch.cat(x[1:], 1)\n concat_x = self.conv(concat_features)\n concat_x = self.adaptivepooling(concat_x)\n final_vec = rearrange(concat_x, \"b ... -> b (...)\")\n theta_conv = self.mapping(final_vec)\n return theta_conv\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.AdaptiveMaxPool1d", "torch.nn.Dropout", "torch.cat", "torch.sin", "torch.nn.ModuleDict", "torch.tensor", "torch.cuda.FloatTensor", "torch.nn.Linear", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.cos" ] ]
tbarua1/MachneLearning
[ "b810a64f4af23174f273d5afb8d0ab4dad7796e7" ]
[ "Chapter06/ch06.py" ]
[ "# coding: utf-8\n\n\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\nimport numpy as np\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import validation_curve\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_curve, auc\nfrom scipy import interp\nfrom sklearn.utils import resample\n\n# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017\n# \n# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition\n# \n# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)\n\n# # Python Machine Learning - Code Examples\n\n# # Chapter 6 - Learning Best Practices for Model Evaluation and Hyperparameter Tuning\n\n# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).\n\n\n\n\n\n# *The use of `watermark` is optional. You can install this IPython extension via \"`pip install watermark`\". For more information, please see: https://github.com/rasbt/watermark.*\n\n\n# ### Overview\n\n# - [Streamlining workflows with pipelines](#Streamlining-workflows-with-pipelines)\n# - [Loading the Breast Cancer Wisconsin dataset](#Loading-the-Breast-Cancer-Wisconsin-dataset)\n# - [Combining transformers and estimators in a pipeline](#Combining-transformers-and-estimators-in-a-pipeline)\n# - [Using k-fold cross-validation to assess model performance](#Using-k-fold-cross-validation-to-assess-model-performance)\n# - [The holdout method](#The-holdout-method)\n# - [K-fold cross-validation](#K-fold-cross-validation)\n# - [Debugging algorithms with learning and validation curves](#Debugging-algorithms-with-learning-and-validation-curves)\n# - [Diagnosing bias and variance problems with learning curves](#Diagnosing-bias-and-variance-problems-with-learning-curves)\n# - [Addressing overfitting and underfitting with validation curves](#Addressing-overfitting-and-underfitting-with-validation-curves)\n# - [Fine-tuning machine learning models via grid search](#Fine-tuning-machine-learning-models-via-grid-search)\n# - [Tuning hyperparameters via grid search](#Tuning-hyperparameters-via-grid-search)\n# - [Algorithm selection with nested cross-validation](#Algorithm-selection-with-nested-cross-validation)\n# - [Looking at different performance evaluation metrics](#Looking-at-different-performance-evaluation-metrics)\n# - [Reading a confusion matrix](#Reading-a-confusion-matrix)\n# - [Optimizing the precision and recall of a classification model](#Optimizing-the-precision-and-recall-of-a-classification-model)\n# - [Plotting a receiver operating characteristic](#Plotting-a-receiver-operating-characteristic)\n# - [The scoring metrics for multiclass classification](#The-scoring-metrics-for-multiclass-classification)\n# - [Dealing with class imbalance](#Dealing-with-class-imbalance)\n# - [Summary](#Summary)\n\n\n\n\n\n\n# # Streamlining workflows with pipelines\n\n# ...\n\n# ## Loading the Breast Cancer Wisconsin dataset\n\n\n\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/'\n 'machine-learning-databases'\n '/breast-cancer-wisconsin/wdbc.data', header=None)\n\n# if the Breast Cancer dataset is temporarily unavailable from the\n# UCI machine learning repository, un-comment the following line\n# of code to load the dataset from a local path:\n\n# df_wine = pd.read_csv('wdbc.data', header=None)\n\ndf.head()\n\n\n\n\ndf.shape\n\n\n\n\n\n\nX = df.loc[:, 2:].values\ny = df.loc[:, 1].values\nle = LabelEncoder()\ny = le.fit_transform(y)\nle.classes_\n\n\n\n\nle.transform(['M', 'B'])\n\n\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.20,\n stratify=y,\n random_state=1)\n\n\n\n# ## Combining transformers and estimators in a pipeline\n\n\n\n\npipe_lr = make_pipeline(StandardScaler(),\n PCA(n_components=2),\n LogisticRegression(random_state=1))\n\npipe_lr.fit(X_train, y_train)\ny_pred = pipe_lr.predict(X_test)\nprint('Test Accuracy: %.3f' % pipe_lr.score(X_test, y_test))\n\n\n\n\n\n\n\n# # Using k-fold cross validation to assess model performance\n\n# ...\n\n# ## The holdout method\n\n\n\n\n\n\n# ## K-fold cross-validation\n\n\n\n\n\n\n\n \n\nkfold = StratifiedKFold(n_splits=10,\n random_state=1).split(X_train, y_train)\n\nscores = []\nfor k, (train, test) in enumerate(kfold):\n pipe_lr.fit(X_train[train], y_train[train])\n score = pipe_lr.score(X_train[test], y_train[test])\n scores.append(score)\n print('Fold: %2d, Class dist.: %s, Acc: %.3f' % (k+1,\n np.bincount(y_train[train]), score))\n \nprint('\\nCV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))\n\n\n\n\n\nscores = cross_val_score(estimator=pipe_lr,\n X=X_train,\n y=y_train,\n cv=10,\n n_jobs=1)\nprint('CV accuracy scores: %s' % scores)\nprint('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))\n\n\n\n# # Debugging algorithms with learning curves\n\n\n# ## Diagnosing bias and variance problems with learning curves\n\n\n\n\n\n\n\n\n\npipe_lr = make_pipeline(StandardScaler(),\n LogisticRegression(penalty='l2', random_state=1))\n\ntrain_sizes, train_scores, test_scores = learning_curve(estimator=pipe_lr,\n X=X_train,\n y=y_train,\n train_sizes=np.linspace(0.1, 1.0, 10),\n cv=10,\n n_jobs=1)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nplt.plot(train_sizes, train_mean,\n color='blue', marker='o',\n markersize=5, label='training accuracy')\n\nplt.fill_between(train_sizes,\n train_mean + train_std,\n train_mean - train_std,\n alpha=0.15, color='blue')\n\nplt.plot(train_sizes, test_mean,\n color='green', linestyle='--',\n marker='s', markersize=5,\n label='validation accuracy')\n\nplt.fill_between(train_sizes,\n test_mean + test_std,\n test_mean - test_std,\n alpha=0.15, color='green')\n\nplt.grid()\nplt.xlabel('Number of training samples')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.ylim([0.8, 1.03])\nplt.tight_layout()\n#plt.savefig('images/06_05.png', dpi=300)\nplt.show()\n\n\n\n# ## Addressing over- and underfitting with validation curves\n\n\n\n\n\nparam_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\ntrain_scores, test_scores = validation_curve(\n estimator=pipe_lr, \n X=X_train, \n y=y_train, \n param_name='logisticregression__C', \n param_range=param_range,\n cv=10)\n\ntrain_mean = np.mean(train_scores, axis=1)\ntrain_std = np.std(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\ntest_std = np.std(test_scores, axis=1)\n\nplt.plot(param_range, train_mean, \n color='blue', marker='o', \n markersize=5, label='training accuracy')\n\nplt.fill_between(param_range, train_mean + train_std,\n train_mean - train_std, alpha=0.15,\n color='blue')\n\nplt.plot(param_range, test_mean, \n color='green', linestyle='--', \n marker='s', markersize=5, \n label='validation accuracy')\n\nplt.fill_between(param_range, \n test_mean + test_std,\n test_mean - test_std, \n alpha=0.15, color='green')\n\nplt.grid()\nplt.xscale('log')\nplt.legend(loc='lower right')\nplt.xlabel('Parameter C')\nplt.ylabel('Accuracy')\nplt.ylim([0.8, 1.0])\nplt.tight_layout()\n# plt.savefig('images/06_06.png', dpi=300)\nplt.show()\n\n\n\n# # Fine-tuning machine learning models via grid search\n\n\n# ## Tuning hyperparameters via grid search \n\n\n\n\npipe_svc = make_pipeline(StandardScaler(),\n SVC(random_state=1))\n\nparam_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]\n\nparam_grid = [{'svc__C': param_range, \n 'svc__kernel': ['linear']},\n {'svc__C': param_range, \n 'svc__gamma': param_range, \n 'svc__kernel': ['rbf']}]\n\ngs = GridSearchCV(estimator=pipe_svc, \n param_grid=param_grid, \n scoring='accuracy', \n cv=10,\n n_jobs=-1)\ngs = gs.fit(X_train, y_train)\nprint(gs.best_score_)\nprint(gs.best_params_)\n\n\n\n\nclf = gs.best_estimator_\nclf.fit(X_train, y_train)\nprint('Test accuracy: %.3f' % clf.score(X_test, y_test))\n\n\n\n# ## Algorithm selection with nested cross-validation\n\n\n\n\n\n\n\ngs = GridSearchCV(estimator=pipe_svc,\n param_grid=param_grid,\n scoring='accuracy',\n cv=2)\n\nscores = cross_val_score(gs, X_train, y_train, \n scoring='accuracy', cv=5)\nprint('CV accuracy: %.3f +/- %.3f' % (np.mean(scores),\n np.std(scores)))\n\n\n\n\n\ngs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0),\n param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}],\n scoring='accuracy',\n cv=2)\n\nscores = cross_val_score(gs, X_train, y_train, \n scoring='accuracy', cv=5)\nprint('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), \n np.std(scores)))\n\n\n\n# # Looking at different performance evaluation metrics\n\n# ...\n\n# ## Reading a confusion matrix\n\n\n\n\n\n\n\n\npipe_svc.fit(X_train, y_train)\ny_pred = pipe_svc.predict(X_test)\nconfmat = confusion_matrix(y_true=y_test, y_pred=y_pred)\nprint(confmat)\n\n\n\n\nfig, ax = plt.subplots(figsize=(2.5, 2.5))\nax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)\nfor i in range(confmat.shape[0]):\n for j in range(confmat.shape[1]):\n ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')\n\nplt.xlabel('Predicted label')\nplt.ylabel('True label')\n\nplt.tight_layout()\n#plt.savefig('images/06_09.png', dpi=300)\nplt.show()\n\n\n# ### Additional Note\n\n# Remember that we previously encoded the class labels so that *malignant* samples are the \"postive\" class (1), and *benign* samples are the \"negative\" class (0):\n\n\n\nle.transform(['M', 'B'])\n\n\n\n\nconfmat = confusion_matrix(y_true=y_test, y_pred=y_pred)\nprint(confmat)\n\n\n# Next, we printed the confusion matrix like so:\n\n\n\nconfmat = confusion_matrix(y_true=y_test, y_pred=y_pred)\nprint(confmat)\n\n\n# Note that the (true) class 0 samples that are correctly predicted as class 0 (true negatives) are now in the upper left corner of the matrix (index 0, 0). In order to change the ordering so that the true negatives are in the lower right corner (index 1,1) and the true positves are in the upper left, we can use the `labels` argument like shown below:\n\n\n\nconfmat = confusion_matrix(y_true=y_test, y_pred=y_pred, labels=[1, 0])\nprint(confmat)\n\n\n# We conclude:\n# \n# Assuming that class 1 (malignant) is the positive class in this example, our model correctly classified 71 of the samples that belong to class 0 (true negatives) and 40 samples that belong to class 1 (true positives), respectively. However, our model also incorrectly misclassified 1 sample from class 0 as class 1 (false positive), and it predicted that 2 samples are benign although it is a malignant tumor (false negatives).\n\n\n# ## Optimizing the precision and recall of a classification model\n\n\n\n\nprint('Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_pred))\nprint('Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_pred))\nprint('F1: %.3f' % f1_score(y_true=y_test, y_pred=y_pred))\n\n\n\n\n\nscorer = make_scorer(f1_score, pos_label=0)\n\nc_gamma_range = [0.01, 0.1, 1.0, 10.0]\n\nparam_grid = [{'svc__C': c_gamma_range,\n 'svc__kernel': ['linear']},\n {'svc__C': c_gamma_range,\n 'svc__gamma': c_gamma_range,\n 'svc__kernel': ['rbf']}]\n\ngs = GridSearchCV(estimator=pipe_svc,\n param_grid=param_grid,\n scoring=scorer,\n cv=10,\n n_jobs=-1)\ngs = gs.fit(X_train, y_train)\nprint(gs.best_score_)\nprint(gs.best_params_)\n\n\n\n# ## Plotting a receiver operating characteristic\n\n\n\n\npipe_lr = make_pipeline(StandardScaler(),\n PCA(n_components=2),\n LogisticRegression(penalty='l2', \n random_state=1, \n C=100.0))\n\nX_train2 = X_train[:, [4, 14]]\n \n\ncv = list(StratifiedKFold(n_splits=3, \n random_state=1).split(X_train, y_train))\n\nfig = plt.figure(figsize=(7, 5))\n\nmean_tpr = 0.0\nmean_fpr = np.linspace(0, 1, 100)\nall_tpr = []\n\nfor i, (train, test) in enumerate(cv):\n probas = pipe_lr.fit(X_train2[train],\n y_train[train]).predict_proba(X_train2[test])\n\n fpr, tpr, thresholds = roc_curve(y_train[test],\n probas[:, 1],\n pos_label=1)\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr,\n tpr,\n label='ROC fold %d (area = %0.2f)'\n % (i+1, roc_auc))\n\nplt.plot([0, 1],\n [0, 1],\n linestyle='--',\n color=(0.6, 0.6, 0.6),\n label='random guessing')\n\nmean_tpr /= len(cv)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nplt.plot(mean_fpr, mean_tpr, 'k--',\n label='mean ROC (area = %0.2f)' % mean_auc, lw=2)\nplt.plot([0, 0, 1],\n [0, 1, 1],\n linestyle=':',\n color='black',\n label='perfect performance')\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('false positive rate')\nplt.ylabel('true positive rate')\nplt.legend(loc=\"lower right\")\n\nplt.tight_layout()\n# plt.savefig('images/06_10.png', dpi=300)\nplt.show()\n\n\n\n# ## The scoring metrics for multiclass classification\n\n\n\npre_scorer = make_scorer(score_func=precision_score, \n pos_label=1, \n greater_is_better=True, \n average='micro')\n\n\n# ## Dealing with class imbalance\n\n\n\nX_imb = np.vstack((X[y == 0], X[y == 1][:40]))\ny_imb = np.hstack((y[y == 0], y[y == 1][:40]))\n\n\n\n\ny_pred = np.zeros(y_imb.shape[0])\nnp.mean(y_pred == y_imb) * 100\n\n\n\n\n\nprint('Number of class 1 samples before:', X_imb[y_imb == 1].shape[0])\n\nX_upsampled, y_upsampled = resample(X_imb[y_imb == 1],\n y_imb[y_imb == 1],\n replace=True,\n n_samples=X_imb[y_imb == 0].shape[0],\n random_state=123)\n\nprint('Number of class 1 samples after:', X_upsampled.shape[0])\n\n\n\n\nX_bal = np.vstack((X[y == 0], X_upsampled))\ny_bal = np.hstack((y[y == 0], y_upsampled))\n\n\n\n\ny_pred = np.zeros(y_bal.shape[0])\nnp.mean(y_pred == y_bal) * 100\n\n\n\n# # Summary\n\n# ...\n\n# ---\n# \n# Readers may ignore the next cell.\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "sklearn.model_selection.validation_curve", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "sklearn.tree.DecisionTreeClassifier", "numpy.mean", "sklearn.metrics.f1_score", "sklearn.preprocessing.LabelEncoder", "numpy.hstack", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "sklearn.model_selection.StratifiedKFold", "numpy.std", "numpy.zeros", "matplotlib.pyplot.figure", "scipy.interp", "matplotlib.pyplot.ylim", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "sklearn.metrics.roc_curve", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "sklearn.metrics.make_scorer", "sklearn.svm.SVC", "sklearn.metrics.auc", "matplotlib.pyplot.show", "sklearn.metrics.recall_score", "sklearn.decomposition.PCA", "matplotlib.pyplot.ylabel", "sklearn.model_selection.GridSearchCV", "sklearn.model_selection.cross_val_score", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "numpy.bincount", "matplotlib.pyplot.grid", "sklearn.utils.resample", "matplotlib.pyplot.xscale", "sklearn.preprocessing.StandardScaler", "numpy.vstack" ] ]
JohnZ03/Open-L2O
[ "06d3860aa9446be2b61368c6ef357a462982db91" ]
[ "Model_Free_L2O/L2O-Scale/L2O-Scale-Training/optimizer/learning_rate_schedule.py" ]
[ "# Copyright 2017 Google, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A trainable optimizer that learns a learning rate schedule.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom optimizer import trainable_optimizer\n\n\nclass LearningRateSchedule(trainable_optimizer.TrainableOptimizer):\n \"\"\"Learns a learning rate schedule over a fixed number of iterations.\"\"\"\n\n def __init__(self, initial_rate=0.0, n_steps=1000, **kwargs):\n \"\"\"Initializes the learning rates.\"\"\"\n self.max_index = tf.constant(n_steps-1, dtype=tf.int32)\n\n with tf.variable_scope(trainable_optimizer.OPTIMIZER_SCOPE):\n initializer = tf.constant_initializer(initial_rate)\n self.learning_rates = tf.get_variable(\"learning_rates\",\n shape=([n_steps,]),\n initializer=initializer)\n\n super(LearningRateSchedule, self).__init__(\"LRS\", [\"itr\"], **kwargs)\n\n def _initialize_state(self, var):\n \"\"\"Return a dictionary mapping names of state variables to their values.\"\"\"\n return {\n \"itr\": tf.constant(0, dtype=tf.int32),\n }\n\n def _compute_update(self, param, grad, state):\n \"\"\"Compute updates of parameters.\"\"\"\n\n # get the learning rate at the current index, if the index\n # is greater than the number of available learning rates,\n # use the last one\n index = tf.minimum(state[\"itr\"], self.max_index)\n learning_rate = tf.gather(self.learning_rates, index)\n\n # update the parameters: parameter - learning_rate * gradient\n updated_param = param - tf.scalar_mul(learning_rate, grad)\n\n return updated_param, {\"itr\": state[\"itr\"] + 1}\n" ]
[ [ "tensorflow.get_variable", "tensorflow.constant", "tensorflow.scalar_mul", "tensorflow.minimum", "tensorflow.constant_initializer", "tensorflow.gather", "tensorflow.variable_scope" ] ]
ngaro/TB2J
[ "2a8dbec0788abe4201fd7001b29b8dfb2aeead64" ]
[ "TB2J/greentest.py" ]
[ "import numpy as np\nfrom scipy.linalg import eigh, inv\n\n\ndef eigen_to_G(evals, evecs, efermi, energy):\n \"\"\" calculate green's function from eigenvalue/eigenvector for energy(e-ef): G(e-ef).\n :param evals: eigen values\n :param evecs: eigen vectors\n :param efermi: fermi energy\n :param energy: energy\n :returns: Green's function G,\n :rtype: Matrix with same shape of the Hamiltonian (and eigenvector)\n \"\"\"\n G=evecs.dot(np.diag(1.0 / (-evals + (energy + efermi)))).dot(\n evecs.conj().T)\n return G\n\ndef green_H(H, energy, S=np.eye(3)):\n return inv(S*energy - H)\n\ndef green_H_eig(H,energy):\n evals, evecs = eigh(H)\n return eigen_to_G(evals, evecs, 0.0, energy)\n\n\ndef test_eigh():\n H=np.random.random((3,3))\n H=H+H.T.conj()\n evals ,evecs=eigh(H)\n #print(f\"VT@V: {evecs.T.conj()@evecs}\")\n green_H(H, 1)\n green_H_eig(H, 1)\n print(f\"H: {H}\")\n\n S=np.random.random((3,3))+np.random.random((3,3))*1j\n S=S+S.T.conj()\n S=np.eye(3)*0.4+S\n evals, evecs=eigh(H, S)\n\n print(f\"VT@V: {evecs.T.conj()@evecs}\")\n print(f\"VT@S@V: {evecs.T.conj()@S@evecs}\") # I\n print(f\"V@S@VT: {evecs@[email protected]()}\") # Not I\n print(f\"S@VT@evals@V: {[email protected]()@np.diag(evals)@evecs}\")\n print(f\"V@evals@VT: {[email protected](evals)@evecs.T.conj()}\")\n print(f\"VT@evals@V: {evecs.T.conj()@np.diag(evals)@evecs}\")\n\n G1=green_H(H, 0.3, S=S)\n #print(\"G1=\", G1)\n\n evals, evecs= eigh(H, S)\n G2= eigen_to_G(evals, evecs, 0.3, 0)\n G2=green_H(H, 0.3, S=S)\n print(f\"G1-G2={G1-G2}\")\n\n\n\n\ntest_eigh()\n" ]
[ [ "numpy.diag", "numpy.random.random", "numpy.eye", "scipy.linalg.eigh", "scipy.linalg.inv" ] ]
bagnalla/tf_mnist
[ "8410b78568235c13696eb9dfa1133eaba1d68b74" ]
[ "dataset.py" ]
[ "########################################################################\n#\n# Class for creating a data-set consisting of all files in a directory.\n#\n# Example usage is shown in the file knifey.py and Tutorial #09.\n#\n# Implemented in Python 3.5\n#\n########################################################################\n#\n# This file is part of the TensorFlow Tutorials available at:\n#\n# https://github.com/Hvass-Labs/TensorFlow-Tutorials\n#\n# Published under the MIT License. See the file LICENSE for details.\n#\n# Copyright 2016 by Magnus Erik Hvass Pedersen\n#\n########################################################################\n\nimport numpy as np\nimport os\nfrom cache import cache\n\n########################################################################\n\n\ndef one_hot_encoded(class_numbers, num_classes=None):\n \"\"\"\n Generate the One-Hot encoded class-labels from an array of integers.\n For example, if class_number=2 and num_classes=4 then\n the one-hot encoded label is the float array: [0. 0. 1. 0.]\n :param class_numbers:\n Array of integers with class-numbers.\n Assume the integers are from zero to num_classes-1 inclusive.\n :param num_classes:\n Number of classes. If None then use max(cls)-1.\n :return:\n 2-dim array of shape: [len(cls), num_classes]\n \"\"\"\n\n # Find the number of classes if None is provided.\n if num_classes is None:\n num_classes = np.max(class_numbers) - 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]\n\n\n########################################################################\n\n\nclass DataSet:\n def __init__(self, in_dir, exts='.jpg'):\n \"\"\"\n Create a data-set consisting of the filenames in the given directory\n and sub-dirs that match the given filename-extensions.\n For example, the knifey-spoony data-set (see knifey.py) has the\n following dir-structure:\n knifey-spoony/forky/\n knifey-spoony/knifey/\n knifey-spoony/spoony/\n knifey-spoony/forky/test/\n knifey-spoony/knifey/test/\n knifey-spoony/spoony/test/\n This means there are 3 classes called: forky, knifey, and spoony.\n If we set in_dir = \"knifey-spoony/\" and create a new DataSet-object\n then it will scan through these directories and create a training-set\n and test-set for each of these classes.\n The training-set will contain a list of all the *.jpg filenames\n in the following directories:\n knifey-spoony/forky/\n knifey-spoony/knifey/\n knifey-spoony/spoony/\n The test-set will contain a list of all the *.jpg filenames\n in the following directories:\n knifey-spoony/forky/test/\n knifey-spoony/knifey/test/\n knifey-spoony/spoony/test/\n See the TensorFlow Tutorial #09 for a usage example.\n :param in_dir:\n Root-dir for the files in the data-set.\n This would be 'knifey-spoony/' in the example above.\n :param exts:\n String or tuple of strings with valid filename-extensions.\n Not case-sensitive.\n :return:\n Object instance.\n \"\"\"\n\n # Extend the input directory to the full path.\n in_dir = os.path.abspath(in_dir)\n\n # Input directory.\n self.in_dir = in_dir\n\n # Convert all file-extensions to lower-case.\n self.exts = tuple(ext.lower() for ext in exts)\n\n # Names for the classes.\n self.class_names = []\n\n # Filenames for all the files in the training-set.\n self.filenames = []\n\n # Filenames for all the files in the test-set.\n self.filenames_test = []\n\n # Class-number for each file in the training-set.\n self.class_numbers = []\n\n # Class-number for each file in the test-set.\n self.class_numbers_test = []\n\n # Total number of classes in the data-set.\n self.num_classes = 0\n\n # For all files/dirs in the input directory.\n for name in os.listdir(in_dir):\n # Full path for the file / dir.\n current_dir = os.path.join(in_dir, name)\n\n # If it is a directory.\n if os.path.isdir(current_dir):\n # Add the dir-name to the list of class-names.\n self.class_names.append(name)\n\n # Training-set.\n\n # Get all the valid filenames in the dir (not sub-dirs).\n filenames = self._get_filenames(current_dir)\n\n # Append them to the list of all filenames for the training-set.\n self.filenames.extend(filenames)\n\n # The class-number for this class.\n class_number = self.num_classes\n\n # Create an array of class-numbers.\n class_numbers = [class_number] * len(filenames)\n\n # Append them to the list of all class-numbers for the training-set.\n self.class_numbers.extend(class_numbers)\n\n # Test-set.\n\n # Get all the valid filenames in the sub-dir named 'test'.\n filenames_test = self._get_filenames(os.path.join(current_dir, 'test'))\n\n # Append them to the list of all filenames for the test-set.\n self.filenames_test.extend(filenames_test)\n\n # Create an array of class-numbers.\n class_numbers = [class_number] * len(filenames_test)\n\n # Append them to the list of all class-numbers for the test-set.\n self.class_numbers_test.extend(class_numbers)\n\n # Increase the total number of classes in the data-set.\n self.num_classes += 1\n\n def _get_filenames(self, dir):\n \"\"\"\n Create and return a list of filenames with matching extensions in the given directory.\n :param dir:\n Directory to scan for files. Sub-dirs are not scanned.\n :return:\n List of filenames. Only filenames. Does not include the directory.\n \"\"\"\n\n # Initialize empty list.\n filenames = []\n\n # If the directory exists.\n if os.path.exists(dir):\n # Get all the filenames with matching extensions.\n for filename in os.listdir(dir):\n if filename.lower().endswith(self.exts):\n filenames.append(filename)\n\n return filenames\n\n def get_paths(self, test=False):\n \"\"\"\n Get the full paths for the files in the data-set.\n :param test:\n Boolean. Return the paths for the test-set (True) or training-set (False).\n :return:\n Iterator with strings for the path-names.\n \"\"\"\n\n if test:\n # Use the filenames and class-numbers for the test-set.\n filenames = self.filenames_test\n class_numbers = self.class_numbers_test\n\n # Sub-dir for test-set.\n test_dir = \"test/\"\n else:\n # Use the filenames and class-numbers for the training-set.\n filenames = self.filenames\n class_numbers = self.class_numbers\n\n # Don't use a sub-dir for test-set.\n test_dir = \"\"\n\n for filename, cls in zip(filenames, class_numbers):\n # Full path-name for the file.\n path = os.path.join(self.in_dir, self.class_names[cls], test_dir, filename)\n\n yield path\n\n def get_training_set(self):\n \"\"\"\n Return the list of paths for the files in the training-set,\n and the list of class-numbers as integers,\n and the class-numbers as one-hot encoded arrays.\n \"\"\"\n\n return list(self.get_paths()), \\\n np.asarray(self.class_numbers), \\\n one_hot_encoded(class_numbers=self.class_numbers,\n num_classes=self.num_classes)\n\n def get_test_set(self):\n \"\"\"\n Return the list of paths for the files in the test-set,\n and the list of class-numbers as integers,\n and the class-numbers as one-hot encoded arrays.\n \"\"\"\n\n return list(self.get_paths(test=True)), \\\n np.asarray(self.class_numbers_test), \\\n one_hot_encoded(class_numbers=self.class_numbers_test,\n num_classes=self.num_classes)\n\n\n########################################################################\n\n\ndef load_cached(cache_path, in_dir):\n \"\"\"\n Wrapper-function for creating a DataSet-object, which will be\n loaded from a cache-file if it already exists, otherwise a new\n object will be created and saved to the cache-file.\n This is useful if you need to ensure the ordering of the\n filenames is consistent every time you load the data-set,\n for example if you use the DataSet-object in combination\n with Transfer Values saved to another cache-file, see e.g.\n Tutorial #09 for an example of this.\n :param cache_path:\n File-path for the cache-file.\n :param in_dir:\n Root-dir for the files in the data-set.\n This is an argument for the DataSet-init function.\n :return:\n The DataSet-object.\n \"\"\"\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n dataset = cache(cache_path=cache_path,\n fn=DataSet, in_dir=in_dir)\n\n return dataset\n\n\n########################################################################\n" ]
[ [ "numpy.asarray", "numpy.max", "numpy.eye" ] ]
mckev/ml
[ "148d573a7070ed8bb240729431c3462e76e275e8" ]
[ "classes/ml/genetic.py" ]
[ "from typing import Optional\n\nimport numpy\n\n\nclass Genetic:\n\n @staticmethod\n def crossover_uniform(parent1: numpy.ndarray, parent2: numpy.ndarray, prob_crossover: float = 0.5):\n # prob_crossover of 0.0 will not have any crossover (i.e. offspring1 = parent1 and offspring2 = parent2)\n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n\n mask = numpy.random.random(size=offspring1.shape) < prob_crossover\n offspring1[mask] = parent2[mask]\n offspring2[mask] = parent1[mask]\n\n return offspring1, offspring2\n\n @staticmethod\n def crossover_binary(parent1: numpy.ndarray, parent2: numpy.ndarray, eta: float):\n # Ref: https://github.com/Chrispresso/SnakeAI/blob/master/genetic_algorithm/crossover.py\n \"\"\"\n This crossover is specific to floating-point representation.\n Simulate behavior of one-point crossover for binary representations.\n\n For large values of eta there is a higher probability that offspring will be created near the parents.\n For small values of eta, offspring will be more distant from parents\n\n Equation 9.9, 9.10, 9.11\n \"\"\"\n rand = numpy.random.random(size=parent1.shape)\n gamma = numpy.empty(shape=parent1.shape)\n gamma[rand <= 0.5] = (2 * rand[rand <= 0.5]) ** (1.0 / (eta + 1)) # First case of equation 9.11\n gamma[rand > 0.5] = (1.0 / (2.0 * (1.0 - rand[rand > 0.5]))) ** (1.0 / (eta + 1)) # Second case\n\n # Calculate Child 1 chromosome (Eq. 9.9)\n chromosome1 = 0.5 * ((1 + gamma) * parent1 + (1 - gamma) * parent2)\n # Calculate Child 2 chromosome (Eq. 9.10)\n chromosome2 = 0.5 * ((1 - gamma) * parent1 + (1 + gamma) * parent2)\n\n return chromosome1, chromosome2\n\n @staticmethod\n def crossover_single_point(parent1: numpy.ndarray, parent2: numpy.ndarray, major='r'):\n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n\n if len(parent2.shape) == 1:\n # 1 dimension shape\n (rows,) = parent2.shape\n row = numpy.random.randint(0, rows)\n if major == 'r':\n offspring1[:row] = parent2[:row]\n offspring2[:row] = parent1[:row]\n elif major == 'c':\n pass\n return offspring1, offspring2\n\n elif len(parent2.shape) == 2:\n # 2 dimensions shape\n rows, cols = parent2.shape\n row = numpy.random.randint(0, rows)\n col = numpy.random.randint(0, cols)\n if major == 'r':\n offspring1[:row, :] = parent2[:row, :]\n offspring2[:row, :] = parent1[:row, :]\n elif major == 'c':\n offspring1[:, :col] = parent2[:, :col]\n offspring2[:, :col] = parent1[:, :col]\n return offspring1, offspring2\n\n else:\n raise NotImplementedError\n\n @staticmethod\n def crossover_multi_points(parent1: numpy.ndarray, parent2: numpy.ndarray, prob_crossover: float, major='r'):\n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n\n if len(parent2.shape) == 1:\n # 1 dimension shape\n (rows,) = parent2.shape\n if major == 'r':\n mask = numpy.random.random(size=(rows,)) < prob_crossover\n offspring1[mask] = parent2[mask]\n offspring2[mask] = parent1[mask]\n elif major == 'c':\n raise NotImplementedError\n return offspring1, offspring2\n\n elif len(parent2.shape) == 2:\n # 2 dimensions shape\n rows, cols = parent2.shape\n if major == 'r':\n mask = numpy.random.random(size=(rows,)) < prob_crossover\n offspring1[mask, :] = parent2[mask, :]\n offspring2[mask, :] = parent1[mask, :]\n elif major == 'c':\n raise NotImplementedError\n return offspring1, offspring2\n\n else:\n raise NotImplementedError\n\n @staticmethod\n def mutate(chromosome: numpy.ndarray, prob_mutation: float,\n mu: Optional[float] = None, sigma: Optional[float] = None,\n scale: Optional[float] = None) -> None:\n # Ref: https://github.com/Chrispresso/SnakeAI/blob/master/genetic_algorithm/mutation.py\n \"\"\"\n Perform a gaussian mutation for each gene in an individual with probability, prob_mutation.\n\n If mu and sigma are defined then the gaussian distribution will be drawn from that,\n otherwise it will be drawn from N(0, 1) for the shape of the individual.\n \"\"\"\n # Determine which genes will be mutated\n mask = numpy.random.random(size=chromosome.shape) < prob_mutation\n # If mu and sigma are defined, create gaussian distribution around each one\n if mu is not None and sigma is not None:\n gaussian_mutation = numpy.random.normal(mu, sigma, size=chromosome.shape)\n else:\n # Otherwise center around N(0,1)\n gaussian_mutation = numpy.random.normal(size=chromosome.shape)\n\n if scale is not None:\n gaussian_mutation *= scale\n\n # Update\n chromosome[mask] += gaussian_mutation[mask]\n" ]
[ [ "numpy.random.normal", "numpy.random.random", "numpy.empty", "numpy.random.randint" ] ]
qiufengdiewu/LPInsider
[ "92fcc2ad9e05cb634c4e3f1accd1220b984a027d" ]
[ "035textCNN_all.py" ]
[ "# coding=utf-8\nfrom sklearn import metrics\nfrom keras.layers import Reshape\nfrom keras.callbacks import EarlyStopping\nfrom keras import Input, Model\nfrom keras.layers import Dense, Conv1D, GlobalMaxPooling1D, Concatenate\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras import backend as K\nfrom sklearn.model_selection import KFold\n\ndef precision(y_true, y_pred):\n # Calculates the precision\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\ndef recall(y_true, y_pred):\n # Calculates the recall\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\ndef fbeta_score(y_true, y_pred, beta=1):\n # Calculates the F score, the weighted harmonic mean of precision and recall.\n if beta < 0:\n raise ValueError('The lowest choosable beta is zero (only precision).')\n\n # If there are no true positives, fix the F score at 0 like sklearn.\n if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:\n return 0\n\n p = precision(y_true, y_pred)\n r = recall(y_true, y_pred)\n bb = beta ** 2\n fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())\n return fbeta_score\n\ndef fmeasure(y_true, y_pred):\n # Calculates the f-measure, the harmonic mean of precision and recall.\n return fbeta_score(y_true, y_pred, beta=1)\n\nclass TextCNN(object):\n def get_model(self):\n input = Input(shape=[19200, ])\n input_reshape = Reshape((96, 200))(input)\n convs = []\n for kernel_size in [3, 5, 7, 9, 11, 13]:\n c = Conv1D(128, kernel_size, activation='relu')(input_reshape)\n c = GlobalMaxPooling1D()(c)\n convs.append(c)\n x = Concatenate()(convs)\n class_num = 1\n output = Dense(units=class_num, activation=\"sigmoid\")(x)\n model = Model(inputs=input, outputs=output)\n model.summary()\n return model\n\n\ndef nomalization(X):\n return preprocessing.scale(X, axis=0)\n\n# 计算词向量\ndef get_sent_vec(size, npLength, sent, model, model_train):\n\n sent = str(sent).replace(',', ' ')\n sent = sent.replace('(', ' ')\n sent = sent.replace(')', ' ')\n sent = sent.replace(\"'\", ' ')\n sent = sent.replace('.', ' ')\n sent = sent.replace(':', ' ')\n sent = sent.replace(']', ' ')\n sent = sent.replace('[', ' ')\n sent = sent.replace('/', ' ')\n words = sent.split(\" \")\n\n vec_zero = np.zeros(size).reshape(1, size)\n count = 0\n vec = []\n for word in words:\n try:\n vec.append(model[word].reshape(1,size))\n count += 1\n except:\n try:\n vec.append(model_train[word].reshape(1,size))\n count += 1\n except:\n continue\n\n for i in range(count,npLength):\n vec.append(vec_zero)\n vec = np.concatenate(vec)\n vec = np.concatenate(vec)\n return nomalization(vec)\n\ndef load_file():\n # 训练模型\n X = pd.read_csv(\"./out/035_36_sample.txt\", sep='\\t', header=None,encoding='ISO-8859-1') ############################\n # 导入模型\n word2vec_path = \"E:/Word2vecModel/wikipedia-pubmed-and-PMC-w2v.bin\"\n word2vec_path_train = './out/03721Word2vec_word2vec_format_model'\n model_train = gensim.models.word2vec.Word2Vec.load(word2vec_path_train)\n model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)\n #model = gensim.models.word2vec.Word2Vec.load(word2vec_path_train) ######################################################\n\n max_words_set=set()\n\n sentX = []\n length = 0\n for i in range(0, len(X), 1):\n sentX.append(X[2][i])\n for sent in sentX:\n sent = str(sent).replace(',', ' ')\n sent = sent.replace('(', ' ')\n sent = sent.replace(')', ' ')\n sent = sent.replace(\"'\", ' ')\n sent = sent.replace('.', ' ')\n sent = sent.replace(':', ' ')\n sent = sent.replace(']', ' ')\n sent = sent.replace('[', ' ')\n sent = sent.replace('/', ' ')\n words = sent.split(\" \")\n for word in words:\n max_words_set.add(word)\n if len(words) > length:\n length = len(words)\n print(\"length\" + str(length))\n XX = []\n ###############\n for i in range(len(sentX)):\n sent = sentX[i]\n sent_vec=get_sent_vec(200, length, sent, model, model_train)\n XX.append([sent_vec])\n i += 1\n\n XX = np.concatenate(XX)\n y = np.load('./out/035_36_sample.npy')\n return XX, y\nif __name__ == '__main__':\n X, y = load_file()\n #早停法 early_stopping\n early_stopping = EarlyStopping(monitor='val_acc', patience=3, mode='max')\n for num in range(3):\n cv = StratifiedKFold(n_splits=10)\n #floder = KFold(n_splits=10, random_state=5 * num, shuffle=True)\n #for train_loc, test_loc in floder.split(X, y):\n for _, (train_loc, test_loc) in enumerate(cv.split(X, y)):\n scaler = StandardScaler()\n x_train = scaler.fit_transform(X[train_loc])#X[train_loc] # scaler.fit_transform(X[train])\n x_test = scaler.transform(X[test_loc])#X[test_loc] # scaler.transform(X[test])\n y_train = y[train_loc]\n y_test = y[test_loc]\n\n batch_size = 10\n epochs = 100\n\n model = TextCNN().get_model()\n model.compile('adam', 'binary_crossentropy',metrics=['accuracy'])\n model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), verbose=0)\n #model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[early_stopping],validation_data=(X_test, y_test))\n result = model.evaluate(x_test,y_test,verbose=0)\n\n model_predict = np.concatenate(np.array(model.predict(x_test)))\n temp=[]\n for pred in model_predict:\n if pred >= 0.5:\n temp.append(1.0)\n else:\n temp.append(0.0)\n model_predict = np.array(temp)\n\n precision = metrics.precision_score(y_true=y_test, y_pred=model_predict)\n recall = metrics.recall_score(y_true=y_test, y_pred=model_predict)\n f1_score = metrics.f1_score(y_true=y_test, y_pred=model_predict)\n f = open(\"./out/035textCNN.txt\", \"a+\")\n f.write(str(result[1]) + \"\\t\" + str(precision) + \"\\t\" + str(recall) + \"\\t\" + str(f1_score) + \"\\t\\n\")\n f.close()" ]
[ [ "numpy.array", "pandas.read_csv", "sklearn.metrics.recall_score", "sklearn.metrics.precision_score", "sklearn.model_selection.StratifiedKFold", "numpy.concatenate", "sklearn.metrics.f1_score", "numpy.load", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.scale", "numpy.zeros" ] ]
AlessandroVol23/kdd-cup-2019
[ "7f140d1d6213dc0d05d07a2c8bff9fe949b72ed8" ]
[ "src/data/raw_features.py" ]
[ "import os\nimport sys\nimport json\nimport math\nimport click\nimport logging\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom pandas.io.json import json_normalize\n\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef read_in_train_data(absolute_raw_data_path):\n \"\"\"\n This function reads in all the train data sets.\n returns: df_train_queries, df_train_plans, df_train_clicks, id_profiles\n \"\"\"\n\n data_set_path = os.path.join(absolute_raw_data_path, 'raw')\n\n df_profiles = pd.read_csv(os.path.join(data_set_path, \"profiles.csv\"))\n df_train_clicks = pd.read_csv(os.path.join(data_set_path, \"train_clicks.csv\"))\n df_train_plans = pd.read_csv(os.path.join(data_set_path, \"train_plans.csv\"))\n df_train_queries = pd.read_csv(os.path.join(data_set_path, \"train_queries.csv\"))\n\n return (df_profiles, df_train_queries, df_train_plans, df_train_clicks)\n\ndef read_in_test_data(absolute_raw_data_path):\n \"\"\"\n This function reads in all the test data sets.\n returns: df_test_queries, df_test_plans\n \"\"\"\n data_set_path = os.path.join(absolute_raw_data_path, 'raw/data_set_phase1')\n df_test_queries = pd.read_csv(os.path.join(data_set_path, \"test_queries.csv\"))\n df_test_plans = pd.read_csv(os.path.join(data_set_path, \"test_plans.csv\"))\n return (df_test_queries, df_test_plans)\n\n\ndef write_data(absolute_raw_data_path, df, train_mode, df_mode, plan_mode='col'):\n if df_mode == 'row':\n filename = 'processed/ranking/' + train_mode + '_raw_' + df_mode + '.pickle'\n else:\n filename = 'processed/multiclass/' + train_mode + '_raw_' + plan_mode + '.pickle'\n print(\"Writing df to pickle in ../data/processed_raw/\")\n df.to_pickle(os.path.join(absolute_raw_data_path, filename))\n return\n\n\ndef raw_preprocessing(df, plandf, profiledf, clickdf=None, df_mode='col', plan_mode='first'):\n\n \"\"\" \n Function to construct dataset with raw features:\n * sid, pid, req_time, o_lat, o_long, d_lat, d_long\n * 3 columns per plan mode, dist_X, price_X, eta_x\n Function looks in absolut_path_raw_folder for the data folder in the project.\n absolute_path_data_folder: Path to raw data folder. Typically /home/xxx/repo/data/raw\n plan_mode: 'first' or 'last'. Sometimes there are several suggestions for the same transport mode. 'first' mode takes the first suggestion, 'last' takes only the last suggestion.\n \"\"\"\n\n def preprocess_coordinates(df):\n '''\n Generates following 5 columns\n * o_long\n * o_lat\n * d_long\n * d_lat\n\n And deletes these:\n * o\n * d\n\n +2 columns\n '''\n df[[\"o_long\", \"o_lat\"]] = df.o.str.split(\",\", 1, expand=True).astype(float)\n df.drop(\"o\", axis=1, inplace=True)\n\n df[[\"d_long\", \"d_lat\"]] = df.d.str.split(\",\", 1, expand=True).astype(float)\n df.drop(\"d\", axis=1, inplace=True)\n\n df['distance_query'] = df.apply(lambda x: (math.sqrt((x.o_long - x.d_long)**2 + (x.o_lat - x.d_lat)**2)), axis=1)\n\n return df\n\n def preprocess_datatypes(df_plans, df_clicks, df_queries):\n df_plans.sid = df_plans.sid.astype(int)\n\n # Check if clicks is empty because clicks just for train\n if df_clicks is not None:\n df_clicks.sid = df_clicks.sid.astype(int)\n\n df_queries.sid = df_queries.sid.astype(int)\n df_queries.req_time = pd.to_datetime(df_queries.req_time)\n\n return df_plans, df_clicks, df_queries\n\n def join_data_sets(df_plans, df_clicks, df_queries, df_profiles, df_mode):\n \"\"\"\n This function joins all datasets together.\n \"\"\"\n\n if df_mode == 'col':\n # adds 2 columns\n if df_clicks is not None:\n df = pd.merge(df_clicks, df_queries, on=\"sid\", how='outer')\n else:\n df = df_queries.copy()\n \n # adds 66 columns\n df = pd.merge(df, df_profiles, how='outer')\n df = df[pd.notnull(df['o_long'])]\n \n # adds 2 columns\n df = pd.merge(df, df_plans, how='outer')\n\n elif df_mode == 'row':\n df = pd.merge(df_plans, df_queries, on = \"sid\", how = \"left\")\n df = pd.merge(df, df_clicks, on = \"sid\", how = \"outer\")\n df['click_mode'] = df['click_mode'].fillna(value=0)\n df = pd.merge(df, df_profiles, on = \"pid\")\n # df = df[pd.notnull(df['o_long'])]\n\n else:\n print(\"Wrong df_mode, try with 'col' or 'row'\")\n sys.exit(-1)\n \n # df.pid = df.pid.apply(lambda x: 0 if np.isnan(x) else x)\n # if df_mode == 'col':\n # for i in range(66):\n # df['p'+str(i)] = df['p'+str(i)].apply(lambda x: -1 if np.isnan(x) else x)\n\n return df\n\n '''\n for 'row' mode, neede for lambdarank\n '''\n def unstack_plans(df_plans):\n df = df_plans.copy()\n\n df.plans = df.apply(\n lambda x: json.loads(\n '{\"plans\":'\n + x.plans\n + ',\"sid\":\"'\n + str(x.sid)\n + '\"'\n + ',\"plan_time\":'\n + '\"'\n + str(x.plan_time)\n + '\"}'\n ), axis=1)\n\n df_unstacked = json_normalize(df.plans.values, \"plans\", [\"sid\", \"plan_time\"])\n df_unstacked.rename({\"distance\": \"distance_plan\"}, axis=1, inplace=True)\n return df_unstacked\n\n def fill_missing_price(df, median=True, mean=False):\n \"\"\"\n This function fills all missing values in price with the median value. \n \"\"\"\n df.price = pd.to_numeric(df.price)\n df.loc[df.price.isna(), \"price\"] = df.price.median()\n return df\n\n # DEPRECATED\n '''\n for col mode, unstack plans in columns, necessary for random forest classifier\n '''\n def initialize_plan_cols(df, modes):\n for mode in modes:\n df[mode] = 0\n return df\n\n # 'first' mode: only the first proposed plan per transport mode is considered\n def preprocess_plans_first(df):\n '''\n Creates 33 new colums, 3 per transport mode\n * dist_0\n * price_0\n * eta_0\n\n +33 columns\n '''\n for i, r in df.iterrows():\n if (i+1) % 5000 == 0:\n print(\"Processing row {}\".format(str(i + 1)), end=\"\\r\")\n if isinstance(r.plans, float):\n # nan, no plan suggestions\n continue\n for pl in json.loads(r.plans):\n df.at[i,'dist_' + str(pl['transport_mode'])] = pl['distance']\n if pl['price']:\n df.at[i,'price_' + str(pl['transport_mode'])] = pl['price']\n else:\n df.at[i,'price_' + str(pl['transport_mode'])] = 700\n df.at[i,'eta_' + str(pl['transport_mode'])] = pl['eta']\n print(\"\\n\")\n return df\n\n\n # 'last' mode: only the last proposed plan per transport mode is considered\n def preprocess_plans_last(df):\n for i, r in df.iterrows():\n if (i+1) % 5000 == 0:\n print(\"Processing row {}\".format(str(i + 1)), end=\"\\r\")\n if isinstance(r.plans, float):\n # nan\n continue\n visited = []\n for pl in json.loads(r.plans):\n if pl['transport_mode'] in visited:\n continue\n visited.append(pl['transport_mode'])\n df.at[i,'dist_' + str(pl['transport_mode'])] = pl['distance']\n if pl['price']:\n df.at[i,'price_' + str(pl['transport_mode'])] = pl['price']\n else:\n df.at[i,'price_' + str(pl['transport_mode'])] = 99999\n df.at[i,'eta_' + str(pl['transport_mode'])] = pl['eta']\n return df\n\n def gen_plan_feas(data, col_name='plans'):\n n = data.shape[0]\n mode_list_feas = np.zeros((n, 12))\n max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))\n\n max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))\n\n max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))\n\n min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(\n (n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))\n mode_texts = []\n \n for i, plan in tqdm(enumerate(data[col_name].values)):\n try:\n cur_plan_list = json.loads(plan)\n except:\n cur_plan_list = []\n if len(cur_plan_list) == 0:\n mode_list_feas[i, 0] = 1\n first_mode[i] = 0\n\n max_dist[i] = -1\n min_dist[i] = -1\n mean_dist[i] = -1\n std_dist[i] = -1\n\n max_price[i] = -1\n min_price[i] = -1\n mean_price[i] = -1\n std_price[i] = -1\n\n max_eta[i] = -1\n min_eta[i] = -1\n mean_eta[i] = -1\n std_eta[i] = -1\n\n min_dist_mode[i] = -1\n max_dist_mode[i] = -1\n min_price_mode[i] = -1\n max_price_mode[i] = -1\n min_eta_mode[i] = -1\n max_eta_mode[i] = -1\n\n mode_texts.append('word_null')\n else:\n distance_list = []\n price_list = []\n eta_list = []\n mode_list = []\n for tmp_dit in cur_plan_list:\n distance_list.append(int(tmp_dit['distance']))\n if tmp_dit['price'] == '':\n price_list.append(0)\n else:\n price_list.append(int(tmp_dit['price']))\n eta_list.append(int(tmp_dit['eta']))\n mode_list.append(int(tmp_dit['transport_mode']))\n mode_texts.append(' '.join(['word_{}'.format(mode) for mode in mode_list]))\n distance_list = np.array(distance_list)\n price_list = np.array(price_list)\n eta_list = np.array(eta_list)\n mode_list = np.array(mode_list, dtype='int')\n mode_list_feas[i, mode_list] = 1\n distance_sort_idx = np.argsort(distance_list)\n price_sort_idx = np.argsort(price_list)\n eta_sort_idx = np.argsort(eta_list)\n\n max_dist[i] = distance_list[distance_sort_idx[-1]]\n min_dist[i] = distance_list[distance_sort_idx[0]]\n mean_dist[i] = np.mean(distance_list)\n std_dist[i] = np.std(distance_list)\n\n max_price[i] = price_list[price_sort_idx[-1]]\n min_price[i] = price_list[price_sort_idx[0]]\n mean_price[i] = np.mean(price_list)\n std_price[i] = np.std(price_list)\n\n max_eta[i] = eta_list[eta_sort_idx[-1]]\n min_eta[i] = eta_list[eta_sort_idx[0]]\n mean_eta[i] = np.mean(eta_list)\n std_eta[i] = np.std(eta_list)\n\n first_mode[i] = mode_list[0]\n max_dist_mode[i] = mode_list[distance_sort_idx[-1]]\n min_dist_mode[i] = mode_list[distance_sort_idx[0]]\n\n max_price_mode[i] = mode_list[price_sort_idx[-1]]\n min_price_mode[i] = mode_list[price_sort_idx[0]]\n\n max_eta_mode[i] = mode_list[eta_sort_idx[-1]]\n min_eta_mode[i] = mode_list[eta_sort_idx[0]]\n\n feature_data = pd.DataFrame(mode_list_feas)\n feature_data.columns = ['mode_{}_available'.format(i) for i in range(12)]\n feature_data['max_dist'] = max_dist\n feature_data['min_dist'] = min_dist\n feature_data['mean_dist'] = mean_dist\n feature_data['std_dist'] = std_dist\n\n feature_data['max_price'] = max_price\n feature_data['min_price'] = min_price\n feature_data['mean_price'] = mean_price\n feature_data['std_price'] = std_price\n\n feature_data['max_eta'] = max_eta\n feature_data['min_eta'] = min_eta\n feature_data['mean_eta'] = mean_eta\n feature_data['std_eta'] = std_eta\n\n feature_data['max_dist_mode'] = max_dist_mode\n feature_data['min_dist_mode'] = min_dist_mode\n feature_data['max_price_mode'] = max_price_mode\n feature_data['min_price_mode'] = min_price_mode\n feature_data['max_eta_mode'] = max_eta_mode\n feature_data['min_eta_mode'] = min_eta_mode\n feature_data['first_mode'] = first_mode\n \n print('mode tfidf...')\n tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))\n tfidf_vec = tfidf_enc.fit_transform(mode_texts)\n svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)\n mode_svd = svd_enc.fit_transform(tfidf_vec)\n mode_svd = pd.DataFrame(mode_svd)\n mode_svd.columns = ['TFIDF_clustered_{}'.format(i) for i in range(10)]\n data = pd.concat([data, feature_data, mode_svd], axis=1)\n \n #data = data.drop([col_name], axis=1)\n return data\n\n def gen_profile_feas(data, profile_data):\n \n # add \"-1\" as new PID, that has \"-1\" on all p0,...p65\n profile_data.loc[len(profile_data)] = list(np.repeat(-1, 67))\n x = profile_data.drop(['pid'], axis=1).values\n svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)\n svd_x = svd.fit_transform(x)\n svd_feas = pd.DataFrame(svd_x)\n newcols = ['PID_MainComp_{}'.format(i) for i in range(20)]\n svd_feas.columns = newcols\n svd_feas['pid'] = profile_data['pid'].values\n data['pid'] = data['pid'].fillna(-1)\n data = data.merge(svd_feas, on='pid', how='left')\n return data\n\n def gen_time_feas(data):\n data['req_time'] = pd.to_datetime(data['req_time'])\n data['weekday'] = data['req_time'].dt.dayofweek\n data['hour'] = data['req_time'].dt.hour\n return data\n\n def split_train_test_by_col(df):\n mask = df['req_time'] >= '2018-11-14'\n train = df[~mask]\n test = df[mask]\n train = train.drop(['req_time'], axis=1)\n test = test.drop(['req_time'], axis=1)\n return (train, test)\n\n def split_train_test_by_row(df):\n df.sort_values(by=['req_time'], inplace=True)\n limit = int(0.8*df.shape[0])\n traindf = df.head(limit)\n testdf = df.tail(len(df) - limit)\n return traindf, testdf\n\t\n def split_train_test_by_row2(df):\n\t \"\"\"\n\t Function to split an df into a train and testset.\n\t 20% for the testset and 80% for the trainset.\n\t Splitting based on \"SIDs\"\n\t \"\"\"\n\t\t# sort by request date\n\t df.sort_values(by=['req_time'], inplace = True)\n\t\t\n\t\t# get the SIDs we want in the TestSet and the TrainSet\n\t uniqueSIDs = df.sid.unique()\n\t limit = int(0.8*len(uniqueSIDs))\n\t\t\n\t train_SIDs = uniqueSIDs[:limit]\n\t test_SIDs = uniqueSIDs[limit:]\n\t\t\n\t traindf = df.loc[df[\"sid\"].isin(train_SIDs), :]\n\t testdf = df.loc[df[\"sid\"].isin(test_SIDs), :]\n\t return traindf, testdf\t\n\n '''\n **********************\n Preprocessing pipeline\n **********************\n '''\n\n print(\"1. Preprocessing coordinates and time\")\n df = preprocess_coordinates(df)\n df = gen_time_feas(df)\n\n if df_mode == 'col':\n print(\"2. Preprocessing df in 'col' mode\")\n plandf, clickdf, df = preprocess_datatypes(plandf, clickdf, df)\n print(\"3. Generating profile features\")\n df = gen_profile_feas(df, profiledf)\n df = join_data_sets(plandf, clickdf, df, profiledf, df_mode)\n print(\"4. Generating plan features\")\n df = gen_plan_feas(df, col_name='plans')\n \n num_modes = 12\n modes = []\n for i in range(num_modes):\n modes.append('dist_' + str(i))\n modes.append('price_' + str(i))\n modes.append('eta_' + str(i))\n\n df = initialize_plan_cols(df, modes)\n if plan_mode == 'first':\n print(\"5. Preprocessing plans in 'first' mode\")\n df = preprocess_plans_first(df)\n elif plan_mode == 'last':\n print(\"5. Preprocessing plans in 'last' mode\")\n df = preprocess_plans_last(df)\n else:\n print(\"ERROR: wrong plan mode. Try with 'first' or 'last'.\")\n sys.exit(1)\n\n elif df_mode == 'row':\n print(\"2. Preprocessing df in 'row' mode\")\n df0 = df.merge(plandf, on = 'sid', how = 'left') # creates empty 'plan_time' & empty 'plans', as not all SIDs are in plandf... \n print(\"3. Generating profile features\")\n df0 = gen_profile_feas(df0, profiledf) # fine\n print(\"4. Generating plan features\")\n df_with_plans = gen_plan_feas(df0) # fine --> SIDs without plan, also get the transmode specific columns, but all with 0/ -1 if there is nothing avaible\n print(\"5. Unstacking plans into rows\")\n df_plans_pp = unstack_plans(plandf) # unstack plans from json to DF\n df_plans_pp, clickdf, df_with_plans = preprocess_datatypes(df_plans_pp, clickdf, df_with_plans)\n df = join_data_sets(df_plans_pp, clickdf, df_with_plans, profiledf, df_mode)\n df = fill_missing_price(df)\n else:\n print(\"Wrong df mode, try with 'row' or 'col'\")\n sys.exit(-1)\n\n if 'click_mode' in df:\n print(\"6. Preprocessing click_mode\")\n df.click_mode = df.click_mode.apply(lambda x: 0 if np.isnan(x) else x)\n df['Response'] = df.click_mode\n\n if 'plans' in df:\n df = df.drop('plans', axis=1)\n \n print(\"7. Split train and test\")\n print(str(df.shape[0]), str(df.shape[1]))\n #traindf, testdf = split_train_test_by_col(df)\n if df_mode == 'col':\n traindf, testdf = split_train_test_by_row(df)\n\t\t\n if df_mode == 'row':\n traindf, testdf = split_train_test_by_row2(df)\n\n return (traindf, testdf)\n\[email protected]()\[email protected](\"absolute_path_data_folder\")\[email protected](\"df_mode\")\[email protected](\"plan_mode\")\ndef main(absolute_path_data_folder, df_mode, plan_mode):\n\n df_profiles, df_train_queries, df_train_plans, df_train_clicks = read_in_train_data(absolute_path_data_folder)\n # df_test_queries, df_test_plans = read_in_test_data(absolute_raw_data_path)\n \n print(\"traindf: creating raw features for df_train\")\n df_tr_train, df_tr_test = raw_preprocessing(df_train_queries, df_train_plans, df_profiles, clickdf=df_train_clicks, df_mode=df_mode, plan_mode=plan_mode)\n write_data(absolute_path_data_folder, df_tr_train, 'train', df_mode, plan_mode)\n write_data(absolute_path_data_folder, df_tr_test, 'test', df_mode, plan_mode)\n\n return\n '''DEPRECATED\n print(\"\\n\")\n print(\"testdf_ creating raw features for df_test\")\n df_test = raw_preprocessing(df_test_queries, df_test_plans, df_profiles, df_mode=df_mode, plan_mode=plan_mode)\n write_data(absolute_path_data_folder, df_test, 'test', df_mode, plan_mode)\n '''\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "sklearn.decomposition.TruncatedSVD", "pandas.concat", "pandas.to_datetime", "pandas.merge", "pandas.notnull", "numpy.isnan", "numpy.repeat", "pandas.DataFrame", "numpy.std", "numpy.mean", "numpy.argsort", "pandas.io.json.json_normalize", "numpy.array", "pandas.to_numeric", "sklearn.feature_extraction.text.TfidfVectorizer", "numpy.zeros" ] ]
rahuja123/reid-strong-baseline
[ "dbc8da7badc616e8ba78471c6e77cc3b21b83759" ]
[ "engine/trainer.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: [email protected]\n\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint, Timer\nfrom ignite.metrics import RunningAverage\n\nfrom utils.reid_metric import R1_mAP\n\nglobal ITER\nITER = 0\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n optimizer (`torch.optim.Optimizer`): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n\n Returns:\n Engine: a trainer engine with supervised update function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n img, target = batch\n img = img.to(device) if torch.cuda.device_count() >= 1 else img\n target = target.to(device) if torch.cuda.device_count() >= 1 else target\n score, feat = model(img)\n loss = loss_fn(score, feat, target)\n loss.backward()\n optimizer.step()\n # compute acc\n acc = (score.max(1)[1] == target).float().mean()\n return loss.item(), acc.item()\n\n return Engine(_update)\n\n\ndef create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cetner_loss_weight,\n device=None):\n \"\"\"\n Factory function for creating a trainer for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n optimizer (`torch.optim.Optimizer`): the optimizer to use\n loss_fn (torch.nn loss function): the loss function to use\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n\n Returns:\n Engine: a trainer engine with supervised update function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n optimizer_center.zero_grad()\n img, target = batch\n img = img.to(device) if torch.cuda.device_count() >= 1 else img\n target = target.to(device) if torch.cuda.device_count() >= 1 else target\n score, feat = model(img)\n loss = loss_fn(score, feat, target)\n # print(\"Total loss is {}, center loss is {}\".format(loss, center_criterion(feat, target)))\n loss.backward()\n optimizer.step()\n for param in center_criterion.parameters():\n param.grad.data *= (1. / cetner_loss_weight)\n optimizer_center.step()\n\n # compute acc\n acc = (score.max(1)[1] == target).float().mean()\n return loss.item(), acc.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics,\n device=None):\n \"\"\"\n Factory function for creating an evaluator for supervised models\n\n Args:\n model (`torch.nn.Module`): the model to train\n metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n Returns:\n Engine: an evaluator engine with supervised inference function\n \"\"\"\n if device:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n data, pids, camids = batch\n data = data.to(device) if torch.cuda.device_count() >= 1 else data\n feat = model(data)\n return feat, pids, camids\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n\n\ndef do_train(\n cfg,\n model,\n train_loader,\n val_loader,\n optimizer,\n scheduler,\n loss_fn,\n num_query,\n start_epoch\n):\n log_period = cfg.SOLVER.LOG_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n eval_period = cfg.SOLVER.EVAL_PERIOD\n output_dir = cfg.OUTPUT_DIR\n device = cfg.MODEL.DEVICE\n epochs = cfg.SOLVER.MAX_EPOCHS\n\n logger = logging.getLogger(\"reid_baseline.train\")\n logger.info(\"Start training\")\n trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)\n evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)\n # checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)\n checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, n_saved=10, require_empty=False)\n timer = Timer(average=True)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,\n 'optimizer': optimizer})\n timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n # average metric to attach on trainer\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')\n RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')\n\n @trainer.on(Events.STARTED)\n def start_training(engine):\n engine.state.epoch = start_epoch\n\n @trainer.on(Events.EPOCH_STARTED)\n def adjust_learning_rate(engine):\n scheduler.step()\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n global ITER\n ITER += 1\n\n if ITER % log_period == 0:\n logger.info(\"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}\"\n .format(engine.state.epoch, ITER, len(train_loader),\n engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],\n scheduler.get_lr()[0]))\n if len(train_loader) == ITER:\n ITER = 0\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'\n .format(engine.state.epoch, timer.value() * timer.step_count,\n train_loader.batch_size / timer.value()))\n logger.info('-' * 10)\n timer.reset()\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n if engine.state.epoch % eval_period == 0:\n evaluator.run(val_loader)\n cmc, mAP = evaluator.state.metrics['r1_mAP']\n logger.info(\"Validation Results - Epoch: {}\".format(engine.state.epoch))\n logger.info(\"mAP: {:.1%}\".format(mAP))\n for r in [1, 5, 10]:\n logger.info(\"CMC curve, Rank-{:<3}:{:.1%}\".format(r, cmc[r - 1]))\n\n trainer.run(train_loader, max_epochs=epochs)\n\n\ndef do_train_with_center(\n cfg,\n model,\n center_criterion,\n train_loader,\n val_loader,\n optimizer,\n optimizer_center,\n scheduler,\n loss_fn,\n num_query,\n start_epoch\n):\n log_period = cfg.SOLVER.LOG_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n eval_period = cfg.SOLVER.EVAL_PERIOD\n output_dir = cfg.OUTPUT_DIR\n device = cfg.MODEL.DEVICE\n epochs = cfg.SOLVER.MAX_EPOCHS\n\n logger = logging.getLogger(\"reid_baseline.train\")\n logger.info(\"Start training\")\n trainer = create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cfg.SOLVER.CENTER_LOSS_WEIGHT, device=device)\n evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)\n # checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)\n checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, n_saved=10, require_empty=False)\n timer = Timer(average=True)\n\n trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,\n 'optimizer': optimizer,\n 'center_param': center_criterion,\n 'optimizer_center': optimizer_center})\n\n timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n # average metric to attach on trainer\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')\n RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')\n\n @trainer.on(Events.STARTED)\n def start_training(engine):\n engine.state.epoch = start_epoch\n\n @trainer.on(Events.EPOCH_STARTED)\n def adjust_learning_rate(engine):\n scheduler.step()\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n global ITER\n ITER += 1\n\n if ITER % log_period == 0:\n logger.info(\"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}\"\n .format(engine.state.epoch, ITER, len(train_loader),\n engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],\n scheduler.get_lr()[0]))\n if len(train_loader) == ITER:\n ITER = 0\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'\n .format(engine.state.epoch, timer.value() * timer.step_count,\n train_loader.batch_size / timer.value()))\n logger.info('-' * 10)\n timer.reset()\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n if engine.state.epoch % eval_period == 0:\n evaluator.run(val_loader)\n cmc, mAP = evaluator.state.metrics['r1_mAP']\n logger.info(\"Validation Results - Epoch: {}\".format(engine.state.epoch))\n logger.info(\"mAP: {:.1%}\".format(mAP))\n for r in [1, 5, 10]:\n logger.info(\"CMC curve, Rank-{:<3}:{:.1%}\".format(r, cmc[r - 1]))\n\n trainer.run(train_loader, max_epochs=epochs)\n" ]
[ [ "torch.cuda.device_count", "torch.no_grad", "torch.nn.DataParallel" ] ]
ForrestHurley/REngVelocityProfiling
[ "e32ad024248966cfa4e4282dfb0e9b972ce916b6" ]
[ "generate_regions.py" ]
[ "import numpy as np\nimport random\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches\n\nclass obstacle(object):\n def __init__(self,x=0,y=0,size=(1,1)):\n self.pos = np.array((x,y))\n if (len(size) > 2):\n pass\n #insert exception here\n if (len(size) == 1):\n self.size = np.array((size,size))\n else:\n self.size = np.array(size)\n\n @classmethod\n def RandomObstacle(cls,bottom_left=(0,0),top_right=(10,10),area=None):\n if (area == None):\n max_area = (top_right[0]-bottom_left[0])*(top_right[1]-bottom_left[1])\n area = random.uniform(max_area*0.05,max_area*0.4)\n\n x_size = random.uniform(0.33333*(area**0.5),3*(area**0.5))\n y_size = area / x_size\n\n position = (random.uniform(bottom_left[0],top_right[0]-x_size),\n random.uniform(bottom_left[1],top_right[1]-y_size))\n rand_obs = cls(x=position[0],y=position[1],size=(x_size,y_size))\n return rand_obs\n\n def get_points(self,resolution):\n return (np.array([self.pos]))\n\n def contains_point(self,point):\n pass\n\n def draw(self,color=\"b\"):\n pass\n\nclass rectangle(obstacle):\n def get_points(self): \n x = np.linspace(self.pos[0],self.pos[0]+self.size[0],dtype=\"int\")\n y = np.linspace(self.pos[1],self.pos[1]+self.size[1],dtype=\"int\")\n \n xy = np.concatenate(([x,np.full(x.shape,int(self.pos[1]))], #Don't double count the corners\n [x,np.full(x.shape,int(self.pos[1]+self.size[1]))],\n [np.full(y.shape,int(self.pos[0])),y],\n [np.full(y.shape,int(self.pos[0]+self.size[0])),y]),axis=-1)\n return xy\n\n def draw(self,color=\"b\",fill = True):\n #points = self.get_points()\n #plt.scatter(points[0],points[1])\n rect = patches.Rectangle(self.pos,\n self.size[0],\n self.size[1],\n color = color,\n fill = fill)\n return rect\n\n def contains_points(self,points,epsilon = 1):\n return np.logical_and(points > (self.pos - epsilon), points < (np.add(self.pos, self.size) + epsilon))\n\nclass ellipse(obstacle):\n pass\n\nclass list_obstacle(obstacle):\n pass\n\nclass cubic_spline(list_obstacle):\n pass\n\nclass linear_interpolator(list_obstacle):\n pass\n\nclass region(object):\n def __init__(self,size=(10,10)):\n self.size = size\n self.obstacles = [ rectangle(size=size) ] #The region bounding box\n\n @classmethod\n def RandomBlocks(cls,count=10,total_area=0.3,size=(100,100),safe_points = np.array([[10,90],[10,90]])):\n random_region = cls(size)\n\n uniform_area = size[0]*size[1]*total_area/count\n obstacle_areas = [uniform_area for i in range(count)] #TODO: make code to make sum of areas equal total area\n\n for area in obstacle_areas:\n while True:\n new_obstacle = rectangle.RandomObstacle(bottom_left=(0,0),\n top_right=random_region.size,area=area)\n if not np.any(new_obstacle.contains_points(safe_points)):\n random_region.obstacles.append(new_obstacle)\n break\n return (random_region)\n\n def get_points(self):\n point_sets = []\n for obs in self.obstacles:\n point_sets.append(obs.get_points())\n points = np.concatenate(point_sets,axis=1)\n return (points)\n \n def draw(self,show=True):\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlim(-1,101)\n ax.set_ylim(-1,101)\n ax.add_patch(self.obstacles[0].draw(fill=False))\n for obs in self.obstacles[1:]:\n ax.add_patch(obs.draw())\n if (show):\n plt.show()\n\nif __name__ == \"__main__\":\n\n from plan_path import path_planner\n\n planner = path_planner.create_planner()\n planner.plot = True\n\n area = region.RandomBlocks(20)\n planned_path = planner.generate_path(area)\n\n" ]
[ [ "numpy.linspace", "matplotlib.patches.Rectangle", "numpy.concatenate", "numpy.add", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
usnistgov/gpsdata
[ "23b2b9ecb13e64da24d6fb15bb2dd3b44c932308" ]
[ "labbench/_serialize.py" ]
[ "\"\"\"functions and CLI tools for mapping labbench objects onto config directories\"\"\"\n\nimport importlib\nimport inspect\nimport os\nfrom pathlib import Path\nimport pandas as pd\nfrom numbers import Number\n\nfrom ._rack import (\n Rack,\n RackMethod,\n Sequence,\n BoundSequence,\n import_as_rack,\n update_parameter_dict,\n)\nfrom . import util\n\n# some packages install ruamel_yaml, others ruamel.yaml. fall back to ruamel_yaml in case ruamel.yaml fails\n# using ruamel yaml instead of pyyaml because it allows us to place comments for human readability\ntry:\n import ruamel.yaml as ruamel_yaml\n from ruamel.yaml.comments import CommentedMap\nexcept ModuleNotFoundError:\n import ruamel_yaml\n from ruamel_yaml.comments import CommentedMap\n\n_yaml = ruamel_yaml.YAML()\n_yaml.indent(mapping=4, sequence=4)\n\nRACK_CONFIG_FILENAME = \"config.yaml\"\nEMPTY = inspect.Parameter.empty\n\n# csv files that define sequences for function execution\nINDEX_COLUMN_NAME = \"step_name\"\n\n_FIELD_SOURCE = \"source\"\n_FIELD_DEVICES = \"devices\"\n_FIELD_KEYWORD_DEFAULTS = \"default_arguments\"\n\n\ndef _yaml_comment_out(cm, key):\n \"\"\"comment out the line containing the item with the specified key\"\"\"\n\n from ruamel_yaml.error import CommentMark\n from ruamel_yaml.tokens import CommentToken\n\n cm.ca.items.setdefault(key, [None, [], None, None])\n cm.ca.items[key][1] = [CommentToken(\"# \", CommentMark(0), None)]\n\n\ndef _quote_strings_recursive(cm):\n \"\"\"apply quotes to dict values that have str type\"\"\"\n\n from ruamel_yaml.scalarstring import DoubleQuotedScalarString as quote\n\n ret = dict()\n\n for k, v in cm.items():\n if isinstance(v, str):\n ret[k] = quote(v)\n elif isinstance(v, dict):\n ret[k] = _quote_strings_recursive(v)\n\n return ret\n\n\ndef _search_method_parameters(rack_cls_or_obj):\n \"\"\"finds parameters of methods in rack_cls and its children recursively.\n\n Arguments:\n A subclass of Rack\n\n Returns:\n {<name (str)>: <(inspect.Parameter)>}, {<name (str)>: {<short_name (str)>: <callable method>>}}\n \"\"\"\n\n parameters = {}\n methods = {}\n\n for rack in rack_cls_or_obj._owners.values():\n # recurse into child racks\n if not isinstance(rack, Rack):\n continue\n\n p, m = _search_method_parameters(rack)\n\n update_parameter_dict(parameters, p)\n\n for name, callables in m.items():\n methods.setdefault(name, {}).update(callables)\n\n if inspect.isclass(rack_cls_or_obj):\n rack = rack_cls_or_obj()\n else:\n rack = rack_cls_or_obj\n\n for method in rack._methods.values():\n p = dict(list(method.extended_signature().parameters.items())) # skip 'self'\n\n update_parameter_dict(parameters, p)\n\n short_names = list(method.__call__.__signature__.parameters.keys())[\n 1:\n ] # skip 'self'\n long_names = list(p.keys()) # extended_signature() does not include 'self'\n for short_name, long_name in zip(short_names, long_names):\n methods.setdefault(long_name, {})[short_name] = method\n\n return parameters, methods\n\n\ndef _adjust_sequence_defaults(rack_cls: type, defaults_in: dict, **override_defaults):\n \"\"\"adjusts the method argument parameters in the Rack subclass `cls` according to config file\"\"\"\n params, methods = _search_method_parameters(rack_cls)\n\n defaults_in = dict(defaults_in, **override_defaults)\n\n sequences = [\n obj for obj in rack_cls._ownables.values() if isinstance(obj, Sequence)\n ]\n\n for name, default in dict(defaults_in).items():\n if default == params[name].default:\n del defaults_in[name]\n continue\n\n annot = params[name].annotation\n if name not in methods:\n clsname = rack_cls.__qualname__\n raise KeyError(\n f\"'{name}' is not a keyword argument of any method of '{clsname}'\"\n )\n\n elif annot is not EMPTY and not isinstance(default, annot):\n if isinstance(default, Number) and issubclass(annot, Number):\n # allow casting for numbers\n default = annot(default)\n\n else:\n raise TypeError(\n f\"the keyword default configuration at key '{name}' with value \"\n f\"'{default}' conflicts with annotated type '{annot.__qualname__}'\"\n )\n\n # update the call signature\n for short_name, method in methods[name].items():\n method.set_kwdefault(short_name, default)\n\n if len(defaults_in) > 0:\n util.logger.debug(f\"applied defaults {defaults_in}\")\n\n\ndef write_table_stub(rack: Rack, name: str, path: Path, with_defaults: bool = False):\n\n \"\"\"forms an empty DataFrame containing the headers needed for Sequence\n csv files.\n\n Arguments:\n rack: the Rack instance containing the sequence\n path: base directory where the csv should be saved\n with_defaults: whether to include columns when method parameters have defaults\n\n \"\"\"\n\n func = getattr(rack, name)\n if not callable(func):\n raise TypeError(f\"{func} is not callable\")\n try:\n sig = inspect.signature(func)\n except ValueError:\n sig = inspect.signature(func.__call__)\n\n # pick out the desired column names based on with_defaults\n params = sig.parameters\n columns = [\n name\n for name, param in list(params.items())[1:]\n if with_defaults or param.default is EMPTY\n ]\n\n if with_defaults:\n defaults = [\n [\n None if params[name].default is EMPTY else params[name].default\n for name in columns\n ]\n ]\n else:\n defaults = []\n\n df = pd.DataFrame(defaults, columns=columns)\n df.index.name = INDEX_COLUMN_NAME\n df.to_csv(path)\n util.logger.debug(f\"writing csv template to {repr(path)}\")\n\n\ndef _map_method_defaults(rack_cls):\n params, _ = _search_method_parameters(rack_cls)\n cm = CommentedMap(\n {\n k: (None if param.default is EMPTY else param.default)\n for k, param in params.items()\n }\n )\n\n for i, k in enumerate(list(cm.keys())[::-1]):\n if params[k].default is EMPTY:\n # comment out lines with no default to distinguish\n # from None value (which is an empty line in yaml)\n _yaml_comment_out(cm, k)\n\n if params[k].annotation is not EMPTY:\n # comment the type\n cm.yaml_add_eol_comment(key=k, comment=str(params[k].annotation.__name__))\n\n return cm\n\n\ndef _map_devices(cls):\n cm = CommentedMap()\n\n for dev_name, dev in cls._devices.items():\n cm[dev_name] = CommentedMap()\n cm.yaml_set_comment_before_after_key(\n dev_name, before=\"\\n\",\n )\n\n for value_name in dev._value_attrs:\n if not dev._traits[value_name].sets:\n # only show settable traits\n continue\n\n cm[dev_name][value_name] = getattr(dev, value_name)\n trait = getattr(type(dev), value_name)\n\n if trait.help:\n comment = \"\\n\" + trait.help\n else:\n comment = \"\\n(define this value with help to autogenerate this comment)\"\n\n cm[dev_name].yaml_set_comment_before_after_key(\n value_name, before=comment, indent=8\n )\n\n if trait.type is not None:\n comment = trait.type.__name__\n if trait.label:\n comment = f\"{comment} ({trait.label})\"\n\n cm[dev_name].yaml_add_eol_comment(key=value_name, comment=comment)\n\n return cm\n\n\ndef dump_rack(\n rack: Rack,\n output_path: Path,\n sourcepath: Path,\n pythonpath: Path = None,\n exist_ok: bool = False,\n with_defaults: bool = False,\n skip_tables: bool = False,\n):\n if not isinstance(rack, Rack):\n raise TypeError(f\"'rack' argument must be an instance of labbench.Rack\")\n\n cls = type(rack)\n\n output_path = Path(output_path)\n output_path.mkdir(exist_ok=exist_ok, parents=True)\n\n with open(output_path / RACK_CONFIG_FILENAME, \"w\") as stream:\n cm = CommentedMap(\n {\n _FIELD_SOURCE: dict(\n import_string=str(sourcepath),\n class_name=None if cls.__name__ == \"_as_rack\" else cls.__name__,\n python_path=str(pythonpath),\n ),\n _FIELD_KEYWORD_DEFAULTS: _map_method_defaults(rack),\n _FIELD_DEVICES: _map_devices(cls),\n }\n )\n\n cm.yaml_set_comment_before_after_key(\n _FIELD_SOURCE, before=\"orient the python interpreter to the source\",\n )\n\n cm.yaml_set_comment_before_after_key(\n _FIELD_KEYWORD_DEFAULTS,\n before=\"\\nparameter defaults for sequences in rack:\"\n \"\\nthese parameters can be omitted from sequence table columns\",\n )\n\n cm.yaml_set_comment_before_after_key(\n _FIELD_DEVICES, before=\"\\ndevice settings: initial values for value traits\"\n )\n\n # cm = _quote_strings_recursive(cm)\n\n _yaml.dump(cm, stream)\n\n if not skip_tables:\n for name, obj in rack.__dict__.items():\n if not callable(obj):\n continue\n\n table_path = getattr(obj, \"_tags\", {}).get(\"table_path\", None)\n\n if table_path is None and not hasattr(Rack, name):\n table_path = name + \".csv\"\n\n if table_path is not None:\n # write_csv_template(obj, output_path/table_path)\n # obj.to_template(output_path / f\"{obj.__name__}.csv\")\n write_table_stub(\n rack, name, output_path / table_path, with_defaults=with_defaults\n )\n\n\ndef read_yaml_config(config_path: str):\n with open(config_path, \"r\") as f:\n config = _yaml.load(f)\n util.logger.debug(f'loaded configuration from \"{str(config_path)}\"')\n return config\n\n\ndef load_rack(output_path: str, defaults: dict = {}, apply: bool = True) -> Rack:\n \"\"\"instantiates a Rack object from a config directory created by dump_rack.\n \n After instantiation, the current working directory is changed to output_path.\n \"\"\"\n\n config_path = Path(output_path) / RACK_CONFIG_FILENAME\n config = read_yaml_config(config_path)\n\n if \"import_string\" not in config[_FIELD_SOURCE]:\n raise KeyError(f\"import_string missing from '{str(config_path)}'\")\n\n append_path = config[_FIELD_SOURCE][\"python_path\"]\n\n # synthesize a Rack class\n rack_cls = import_as_rack(\n import_string=config[_FIELD_SOURCE][\"import_string\"],\n cls_name=config[_FIELD_SOURCE][\"class_name\"],\n append_path=[] if append_path is None else [append_path],\n # TODO: support extensions to python path?\n )\n\n if apply:\n os.chdir(output_path)\n _adjust_sequence_defaults(rack_cls, config[_FIELD_KEYWORD_DEFAULTS], **defaults)\n\n rack_cls._propagate_ownership()\n\n obj = rack_cls()\n\n if apply:\n for name, params in config[_FIELD_DEVICES].items():\n try:\n owned_obj = getattr(obj, name)\n except AttributeError:\n objname = type(obj).__qualname__\n raise IOError(\n f\"{config_path} refers to a device '{name}' that does not exist in {objname}\"\n )\n\n for param_name, param_value in params.items():\n setattr(owned_obj, param_name, param_value)\n\n return obj\n" ]
[ [ "pandas.DataFrame" ] ]
eduartorres/Project-Disaster-Response-Pipeline-
[ "3b0a29f4ebbfe6eb3fc204028d5e4fc5e9f2c956" ]
[ "app/run.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 17 19:16:27 2021\n\n@author: FELIPE\n\"\"\"\n# Note: Read the header before running\n# =============================================================================\n# >>> Project: Disaster Response Pipeline (Udacity - Data Science Nanodegree) <<<\n\n# Sample script execution:\n# > python run.py\n\n# =============================================================================\n\n# FLASK WEB APP\n\n# Loading libraries\n\nimport json\nimport plotly\nimport pandas as pd\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nimport joblib\nfrom sqlalchemy import create_engine\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk import pos_tag, word_tokenize\nimport nltk\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# ============================================================================\n\n# loading data\nengine = create_engine('sqlite:///../data/disaster_response_db.db')\ndf = pd.read_sql_table('disaster_response_db_table', engine)\n\n# ============================================================================\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n# ============================================================================\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n \n # extract data needed for visuals\n # visuals for the genre \n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n category_names = df.iloc[:,4:].columns\n category_boolean = (df.iloc[:,4:] != 0).sum().values\n \n \n # create visuals\n # genre graph and category graph \n graphs = [\n # Graph - genre graph\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts,\n marker = dict(\n color = 'rgb(60,179,113)')\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message by Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n # Graph - category graph \n {\n 'data': [\n Bar(\n x=category_names,\n y=category_boolean,\n marker = dict(\n color = 'rgb(218,165,32)')\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message by Categories',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Category\",\n 'tickangle': 35\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n# ============================================================================\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html\n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_sql_table" ] ]
SatishGitHubs/TensorFlow
[ "422b17b34f4f1380d2e487b3509bb97ff726edca" ]
[ "tensorflow/contrib/layers/python/layers/layers.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=g-short-docstring-punctuation\n\"\"\"Higher level ops for building layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensorflow.contrib.framework.python.ops import add_arg_scope\nfrom tensorflow.contrib.framework.python.ops import variables\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import utils\n\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import standard_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import moving_averages\n\n# TODO(b/28426988): Replace legacy_* fns migrated from slim.\n# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.\n__all__ = ['avg_pool2d',\n 'batch_norm',\n 'bias_add',\n 'conv2d',\n 'conv2d_in_plane',\n 'conv2d_transpose',\n 'convolution2d',\n 'convolution2d_in_plane',\n 'convolution2d_transpose',\n 'dropout',\n 'flatten',\n 'fully_connected',\n 'layer_norm',\n 'linear',\n 'max_pool2d',\n 'one_hot_encoding',\n 'relu',\n 'relu6',\n 'repeat',\n 'separable_conv2d',\n 'separable_convolution2d',\n 'softmax',\n 'stack',\n 'unit_norm',\n 'legacy_fully_connected',\n 'legacy_linear',\n 'legacy_relu']\n\n\n@add_arg_scope\ndef avg_pool2d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 2D average pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A `Tensor` of size [batch_size, height, width, channels].\n kernel_size: A list of length 2: [kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n \"\"\"\n with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n outputs = nn.avg_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef batch_norm(inputs,\n decay=0.999,\n center=True,\n scale=False,\n epsilon=0.001,\n activation_fn=None,\n updates_collections=ops.GraphKeys.UPDATE_OPS,\n is_training=True,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.\n\n \"Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift\"\n\n Sergey Ioffe, Christian Szegedy\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Note: When is_training is True the moving_mean and moving_variance need to be\n updated, by default the update_ops are placed in tf.GraphKeys.UPDATE_OPS so\n they need to be added as a dependency to the train_op, example:\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if update_ops:\n updates = tf.group(*update_ops)\n total_loss = control_flow_ops.with_dependencies([updates], total_loss)\n\n One can set update_collections=None to force the updates in place, but that\n can have speed penalty, specially in distributed settings.\n\n Args:\n inputs: a tensor with 2 or more dimensions, where the first dimension has\n `batch_size`. The normalization is over all but the last dimension.\n decay: decay for the moving average.\n center: If True, subtract `beta`. If False, `beta` is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling can be done by the next layer.\n epsilon: small float added to variance to avoid dividing by zero.\n activation_fn: activation function, default set to None to skip it and\n maintain a linear activation.\n updates_collections: collections to collect the update ops for computation.\n The updates_ops need to be excuted with the train_op.\n If None, a control dependency would be added to make sure the updates are\n computed in place.\n is_training: whether or not the layer is in training mode. In training mode\n it would accumulate the statistics of the moments into `moving_mean` and\n `moving_variance` using an exponential moving average with the given\n `decay`. When it is not in training mode then it would use the values of\n the `moving_mean` and the `moving_variance`.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional collections for the variables.\n outputs_collections: collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: if rank or last dimension of `inputs` is undefined.\n \"\"\"\n with variable_scope.variable_scope(scope, 'BatchNorm', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n dtype = inputs.dtype.base_dtype\n axis = list(range(inputs_rank - 1))\n params_shape = inputs_shape[-1:]\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined last dimension %s.' % (\n inputs.name, params_shape))\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n if center:\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n beta = variables.model_variable('beta',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.zeros_initializer,\n collections=beta_collections,\n trainable=trainable)\n if scale:\n gamma_collections = utils.get_variable_collections(variables_collections,\n 'gamma')\n gamma = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.ones_initializer,\n collections=gamma_collections,\n trainable=trainable)\n # Create moving_mean and moving_variance variables and add them to the\n # appropiate collections.\n moving_mean_collections = utils.get_variable_collections(\n variables_collections, 'moving_mean')\n moving_mean = variables.model_variable(\n 'moving_mean',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.zeros_initializer,\n trainable=False,\n collections=moving_mean_collections)\n moving_variance_collections = utils.get_variable_collections(\n variables_collections, 'moving_variance')\n moving_variance = variables.model_variable(\n 'moving_variance',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.ones_initializer,\n trainable=False,\n collections=moving_variance_collections)\n\n # If `is_training` doesn't have a constant value, because it is a `Tensor`,\n # a `Variable` or `Placeholder` then is_training_value will be None and\n # `needs_moments` will be true.\n is_training_value = utils.constant_value(is_training)\n need_moments = is_training_value is None or is_training_value\n if need_moments:\n # Calculate the moments based on the individual batch.\n # Use a copy of moving_mean as a shift to compute more reliable moments.\n shift = math_ops.add(moving_mean, 0)\n mean, variance = nn.moments(inputs, axis, shift=shift)\n moving_vars_fn = lambda: (moving_mean, moving_variance)\n if updates_collections is None:\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay)\n with ops.control_dependencies([update_moving_mean,\n update_moving_variance]):\n return array_ops.identity(mean), array_ops.identity(variance)\n mean, variance = utils.smart_cond(is_training,\n _force_updates,\n moving_vars_fn)\n else:\n def _delay_updates():\n \"\"\"Internal function that delay updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay)\n return update_moving_mean, update_moving_variance\n\n update_mean, update_variance = utils.smart_cond(is_training,\n _delay_updates,\n moving_vars_fn)\n ops.add_to_collections(updates_collections, update_mean)\n ops.add_to_collections(updates_collections, update_variance)\n # Use computed moments during training and moving_vars otherwise.\n vars_fn = lambda: (mean, variance)\n mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)\n else:\n mean, variance = moving_mean, moving_variance\n # Compute batch_normalization.\n outputs = nn.batch_normalization(\n inputs, mean, variance, beta, gamma, epsilon)\n outputs.set_shape(inputs_shape)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef bias_add(inputs,\n activation_fn=None,\n initializer=init_ops.zeros_initializer,\n regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a bias to the inputs.\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Args:\n inputs: a tensor of with at least rank 2 and value for the last dimension,\n e.g. `[batch_size, depth]`, `[None, None, None, depth]`.\n activation_fn: activation function, default set to None to skip it and\n maintain a linear activation.\n initializer: An initializer for the bias, defaults to 0.\n regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional collections for the variables.\n outputs_collections: collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for variable_scope.\n\n Returns:\n a tensor representing the result of adding biases to the inputs.\n \"\"\"\n with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n dtype = inputs.dtype.base_dtype\n num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)\n biases_collections = utils.get_variable_collections(variables_collections,\n 'biases')\n biases = variables.model_variable('biases',\n shape=[num_features,],\n dtype=dtype,\n initializer=initializer,\n regularizer=regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(inputs, biases)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef convolution2d(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer,\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a 2D convolution followed by an optional batch_norm layer.\n\n `convolution2d` creates a variable called `weights`, representing the\n convolutional kernel, that is convolved with the `inputs` to produce a\n `Tensor` of activations. If a `normalizer_fn` is provided (such as\n `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is\n None and a `biases_initializer` is provided then a `biases` variable would be\n created and added the activations. Finally, if `activation_fn` is not `None`,\n it is applied to the activations as well.\n\n Performs a'trous convolution with input stride equal to rate if rate is\n greater than one.\n\n Args:\n inputs: a 4-D tensor `[batch_size, height, width, channels]`.\n num_outputs: integer, the number of output filters.\n kernel_size: a list of length 2 `[kernel_height, kernel_width]` of\n of the filters. Can be an int if both values are the same.\n stride: a list of length 2 `[stride_height, stride_width]`.\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: one of `VALID` or `SAME`.\n rate: integer. If less than or equal to 1, a standard convolution is used.\n If greater than 1, than the a'trous convolution is applied and `stride`\n must be set to 1.\n activation_fn: activation function, set to None to skip it and maintain\n a linear activation.\n normalizer_fn: normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional list of collections for all the variables or\n a dictionay containing a different list of collection per variable.\n outputs_collections: collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n\n Returns:\n a tensor representing the output of the operation.\n\n Raises:\n ValueError: if both 'rate' and `stride` are larger than one.\n \"\"\"\n with variable_scope.variable_scope(scope, 'Conv', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n if rate > 1 and (stride_h > 1 or stride_w > 1):\n raise ValueError('Only one of rate or stride can be larger than one')\n num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)\n weights_shape = [kernel_h, kernel_w,\n num_filters_in, num_outputs]\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n weights = variables.model_variable('weights',\n shape=weights_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n collections=weights_collections,\n trainable=trainable)\n if rate > 1:\n outputs = nn.atrous_conv2d(inputs, weights, rate, padding=padding)\n else:\n outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],\n padding=padding)\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_outputs,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(outputs, biases)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef convolution2d_in_plane(\n inputs,\n kernel_size,\n stride=1,\n padding='SAME',\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer,\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Performs the same in-plane convolution to each channel independently.\n\n This is useful for performing various simple channel-independent convolution\n operations such as image gradients:\n\n image = tf.constant(..., shape=(16, 240, 320, 3))\n vert_gradients = layers.conv2d_in_plane(image,\n kernel=[1, -1],\n kernel_size=[2, 1])\n horz_gradients = layers.conv2d_in_plane(image,\n kernel=[1, -1],\n kernel_size=[1, 2])\n\n Args:\n inputs: a 4-D tensor with dimensions [batch_size, height, width, channels].\n kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of\n of the pooling. Can be an int if both values are the same.\n stride: a list of length 2 `[stride_height, stride_width]`.\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: the padding type to use, either 'SAME' or 'VALID'.\n activation_fn: activation function, set to None to skip it and maintain\n a linear activation.\n normalizer_fn: normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional list of collections for all the variables or\n a dictionay containing a different list of collection per variable.\n outputs_collections: collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n \"\"\"\n with variable_scope.variable_scope(\n scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)\n weights_shape = [kernel_h, kernel_w, 1, 1]\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n weights = variables.model_variable('weights',\n shape=weights_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n collections=weights_collections,\n trainable=trainable)\n depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])\n outputs = nn.depthwise_conv2d(inputs, depthwise_weights,\n [1, stride_h, stride_w, 1], padding)\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_filters_in,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(outputs, biases)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef convolution2d_transpose(\n inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer,\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a convolution2d_transpose with an optional batch normalization layer.\n\n The function creates a variable called `weights`, representing the\n kernel, that is convolved with the input. If `batch_norm_params` is `None`, a\n second variable called 'biases' is added to the result of the operation.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_outputs: integer, the number of output filters.\n kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of\n of the filters. Can be an int if both values are the same.\n stride: a list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: one of 'VALID' or 'SAME'.\n activation_fn: activation function, set to None to skip it and maintain\n a linear activation.\n normalizer_fn: normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional list of collections for all the variables or\n a dictionay containing a different list of collection per variable.\n outputs_collections: collection to add the outputs.\n trainable: whether or not the variables should be trainable or not.\n scope: Optional scope for variable_scope.\n\n Returns:\n a tensor representing the output of the operation.\n\n Raises:\n ValueError: if 'kernel_size' is not a list of length 2.\n \"\"\"\n with variable_scope.variable_scope(\n scope, 'Conv2d_transpose', [inputs], reuse=reuse) as sc:\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n num_filters_in = utils.last_dimension(\n inputs.get_shape(), min_rank=4)\n weights_shape = [kernel_h, kernel_w, num_outputs, num_filters_in]\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n weights = variables.model_variable(\n 'weights',\n shape=weights_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n trainable=trainable,\n collections=weights_collections)\n\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n height, width = inputs_shape[1], inputs_shape[2]\n\n def get_deconv_dim(dim_size, stride_size, kernel_size, padding):\n if isinstance(dim_size, ops.Tensor):\n dim_size = math_ops.mul(dim_size, stride_size)\n elif dim_size is not None:\n dim_size *= stride_size\n\n if padding == 'VALID' and dim_size is not None:\n dim_size += max(kernel_size - stride_size, 0)\n return dim_size\n\n # Infer the dynamic output shape:\n out_height = get_deconv_dim(height, stride_h, kernel_h, padding)\n out_width = get_deconv_dim(width, stride_w, kernel_w, padding)\n\n output_shape = array_ops.pack(\n [batch_size, out_height, out_width, num_outputs])\n outputs = nn.conv2d_transpose(inputs, weights, output_shape,\n [1, stride_h, stride_w, 1],\n padding=padding)\n\n # Infer the static output shape:\n out_shape = inputs.get_shape().as_list()\n out_shape[-1] = num_outputs\n out_shape[1] = get_deconv_dim(out_shape[1], stride_h, kernel_h, padding)\n out_shape[2] = get_deconv_dim(out_shape[2], stride_w, kernel_w, padding)\n outputs.set_shape(out_shape)\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_outputs,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections)\n outputs = nn.bias_add(outputs, biases)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef dropout(inputs,\n keep_prob=0.5,\n noise_shape=None,\n is_training=True,\n outputs_collections=None,\n scope=None):\n \"\"\"Returns a dropout op applied to the input.\n\n With probability `keep_prob`, outputs the input element scaled up by\n `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected\n sum is unchanged.\n\n Args:\n inputs: the tensor to pass to the nn.dropout op.\n keep_prob: A scalar `Tensor` with the same type as x. The probability\n that each element is kept.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n is_training: A bool `Tensor` indicating whether or not the model\n is in training mode. If so, dropout is applied and values scaled.\n Otherwise, inputs is returned.\n outputs_collections: collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n a tensor representing the output of the operation.\n \"\"\"\n with ops.name_scope(scope, 'Dropout', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)\n id_fn = lambda: inputs\n outputs = utils.smart_cond(is_training, dropout_fn, id_fn)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef flatten(inputs,\n outputs_collections=None,\n scope=None):\n \"\"\"Flattens the input while maintaining the batch_size.\n\n Assumes that the first dimension represents the batch.\n\n Args:\n inputs: a tensor of size [batch_size, ...].\n outputs_collections: collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n a flattened tensor with shape [batch_size, k].\n Raises:\n ValueError: if inputs.shape is wrong.\n \"\"\"\n with ops.name_scope(scope, 'Flatten', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n if (inputs_rank is None) or (inputs_rank < 2):\n raise ValueError('Inputs must have a least 2 dimensions.')\n dims = inputs_shape[1:]\n if not dims.is_fully_defined():\n raise ValueError('Inputs 2nd dimension must be defined.')\n k = dims.num_elements()\n outputs = array_ops.reshape(inputs, [-1, k])\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef fully_connected(inputs,\n num_outputs,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer,\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a fully connected layer.\n\n `fully_connected` creates a variable called `weights`, representing a fully\n connected weight matrix, which is multiplied by the `inputs` to produce a\n `Tensor` of hidden units. If a `normalizer_fn` is provided (such as\n `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is\n None and a `biases_initializer` is provided then a `biases` variable would be\n created and added the hidden units. Finally, if `activation_fn` is not `None`,\n it is applied to the hidden units as well.\n\n Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened\n prior to the initial matrix multiply by `weights`.\n\n Args:\n inputs: A tensor of with at least rank 2 and value for the last dimension,\n i.e. `[batch_size, depth]`, `[None, None, None, channels]`.\n num_outputs: Integer or long, the number of output units in the layer.\n activation_fn: activation function, set to None to skip it and maintain\n a linear activation.\n normalizer_fn: normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collections per variable.\n outputs_collections: collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for variable_scope.\n\n Returns:\n the tensor variable representing the result of the series of operations.\n\n Raises:\n ValueError: if x has rank less than 2 or if its last dimension is not set.\n \"\"\"\n if not (isinstance(num_outputs, int) or isinstance(num_outputs, long)):\n raise ValueError('num_outputs should be int or long, got %s.', num_outputs)\n with variable_scope.variable_scope(scope, 'fully_connected', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n dtype = inputs.dtype.base_dtype\n inputs_shape = inputs.get_shape()\n num_input_units = utils.last_dimension(inputs_shape, min_rank=2)\n\n static_shape = inputs_shape.as_list()\n static_shape[-1] = num_outputs\n\n out_shape = array_ops.unpack(array_ops.shape(inputs))\n out_shape[-1] = num_outputs\n\n weights_shape = [num_input_units, num_outputs]\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n weights = variables.model_variable('weights',\n shape=weights_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n collections=weights_collections,\n trainable=trainable)\n if len(static_shape) > 2:\n # Reshape inputs\n inputs = array_ops.reshape(inputs, [-1, num_input_units])\n outputs = standard_ops.matmul(inputs, weights)\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_outputs,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(outputs, biases)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n if len(static_shape) > 2:\n # Reshape back outputs\n outputs = array_ops.reshape(outputs, array_ops.pack(out_shape))\n outputs.set_shape(static_shape)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef layer_norm(inputs,\n center=True,\n scale=True,\n activation_fn=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.\n\n \"Layer Normalization\"\n\n Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Args:\n inputs: a tensor with 2 or more dimensions. The normalization\n occurs over all but the first dimension.\n center: If True, subtract `beta`. If False, `beta` is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling can be done by the next layer.\n activation_fn: activation function, default set to None to skip it and\n maintain a linear activation.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional collections for the variables.\n outputs_collections: collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_op_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: if rank or last dimension of `inputs` is undefined.\n \"\"\"\n with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n dtype = inputs.dtype.base_dtype\n axis = list(range(1, inputs_rank))\n params_shape = inputs_shape[-1:]\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined last dimension %s.' % (\n inputs.name, params_shape))\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n if center:\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n beta = variables.model_variable('beta',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.zeros_initializer,\n collections=beta_collections,\n trainable=trainable)\n if scale:\n gamma_collections = utils.get_variable_collections(variables_collections,\n 'gamma')\n gamma = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.ones_initializer,\n collections=gamma_collections,\n trainable=trainable)\n # Calculate the moments on the last axis (layer activations).\n mean, variance = nn.moments(inputs, axis, keep_dims=True)\n # Compute layer normalization using the batch_normalization function.\n variance_epsilon = 1E-12\n outputs = nn.batch_normalization(\n inputs, mean, variance, beta, gamma, variance_epsilon)\n outputs.set_shape(inputs_shape)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope,\n outputs)\n\n\n@add_arg_scope\ndef max_pool2d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 2D Max Pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A `Tensor` of size [batch_size, height, width, channels].\n kernel_size: A list of length 2: [kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If 'kernel_size' is not a 2-D list\n \"\"\"\n with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n outputs = nn.max_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef one_hot_encoding(labels,\n num_classes,\n on_value=1.0,\n off_value=0.0,\n outputs_collections=None,\n scope=None):\n \"\"\"Transform numeric labels into onehot_labels using tf.one_hot.\n\n Args:\n labels: [batch_size] target labels.\n num_classes: total number of classes.\n on_value: A scalar defining the on-value.\n off_value: A scalar defining the off-value.\n outputs_collections: collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n one hot encoding of the labels.\n \"\"\"\n with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:\n labels = ops.convert_to_tensor(labels)\n if labels.dtype == dtypes.int32:\n labels = standard_ops.to_int64(labels)\n outputs = standard_ops.one_hot(labels,\n num_classes,\n on_value=on_value,\n off_value=off_value)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\ndef _apply_activation(y, activation_fn, output_collections):\n if activation_fn is not None:\n y = activation_fn(y)\n ops.add_to_collections(list(output_collections or []) +\n [ops.GraphKeys.ACTIVATIONS], y)\n return y\n\n\ndef repeat(inputs, repetitions, layer, *args, **kwargs):\n \"\"\"Applies the same layer with the same arguments repeatedly.\n\n ```python\n y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')\n # It is equivalent to:\n\n x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')\n x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')\n y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')\n ```\n\n If the `scope` argument is not given in `kwargs`, it is set to\n `layer.__name__`, or `layer.func.__name__` (for `functools.partial`\n objects). If neither `__name__` nor `func.__name__` is available, the\n layers are called with `scope='stack'`.\n\n Args:\n inputs: A `Tensor` suitable for layer.\n repetitions: Int, number of repetitions.\n layer: A layer with arguments `(inputs, *args, **kwargs)`\n *args: Extra args for the layer.\n **kwargs: Extra kwargs for the layer.\n\n Returns:\n a tensor result of applying the layer, repetitions times.\n Raises:\n ValueError: if the op is unknown or wrong.\n \"\"\"\n scope = kwargs.pop('scope', None)\n with variable_scope.variable_scope(scope, 'Repeat', [inputs]):\n inputs = ops.convert_to_tensor(inputs)\n if scope is None:\n if hasattr(layer, '__name__'):\n scope = layer.__name__\n elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):\n scope = layer.func.__name__ # In case layer is a functools.partial.\n else:\n scope = 'repeat'\n outputs = inputs\n for i in range(repetitions):\n kwargs['scope'] = scope + '_' + str(i+1)\n outputs = layer(outputs, *args, **kwargs)\n return outputs\n\n\n@add_arg_scope\ndef separable_convolution2d(\n inputs,\n num_outputs,\n kernel_size,\n depth_multiplier,\n stride=1,\n padding='SAME',\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer,\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a depth-separable 2D convolution with optional batch_norm layer.\n\n This op first performs a depthwise convolution that acts separately on\n channels, creating a variable called `depthwise_weights`. If `num_outputs`\n is not None, it adds a pointwise convolution that mixes channels, creating a\n variable called `pointwise_weights`. Then, if `batch_norm_params` is None,\n it adds bias to the result, creating a variable called 'biases', otherwise\n it adds a batch normalization layer. It finally applies an activation function\n to produce the end result.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_outputs: the number of pointwise convolution output filters. If is\n None, then we skip the pointwise convolution stage.\n kernel_size: a list of length 2: [kernel_height, kernel_width] of\n of the filters. Can be an int if both values are the same.\n depth_multiplier: the number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n stride: a list of length 2: [stride_height, stride_width], specifying the\n depthwise convolution stride. Can be an int if both strides are the same.\n padding: one of 'VALID' or 'SAME'.\n activation_fn: activation function, set to None to skip it and maintain\n a linear activation.\n normalizer_fn: normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: optional list of collections for all the variables or\n a dictionay containing a different list of collection per variable.\n outputs_collections: collection to add the outputs.\n trainable: whether or not the variables should be trainable or not.\n scope: Optional scope for variable_scope.\n\n Returns:\n A `Tensor` representing the output of the operation.\n \"\"\"\n with variable_scope.variable_scope(\n scope, 'SeparableConv2d', [inputs], reuse=reuse) as sc:\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n\n depthwise_shape = [kernel_h, kernel_w,\n num_filters_in, depth_multiplier]\n depthwise_weights = variables.model_variable(\n 'depthwise_weights',\n shape=depthwise_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n trainable=trainable,\n collections=weights_collections)\n strides = [1, stride_h, stride_w, 1]\n if num_outputs is not None:\n # Full separable convolution: Depthwise followed by pointwise convolution.\n pointwise_shape = [1, 1, depth_multiplier * num_filters_in,\n num_outputs]\n pointwise_weights = variables.model_variable(\n 'pointwise_weights',\n shape=pointwise_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n trainable=trainable,\n collections=weights_collections)\n outputs = nn.separable_conv2d(inputs,\n depthwise_weights,\n pointwise_weights,\n strides,\n padding)\n else:\n # Depthwise convolution only.\n outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding)\n num_outputs = depth_multiplier * num_filters_in\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_outputs,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections)\n outputs = nn.bias_add(outputs, biases)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef softmax(logits, scope=None):\n \"\"\"Performs softmax on Nth dimension of N-dimensional logit tensor.\n\n For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension\n needs to have a specified number of elements (number of classes).\n\n Args:\n logits: N-dimensional `Tensor` with logits, where N > 1.\n scope: Optional scope for variable_scope.\n\n Returns:\n a `Tensor` with same shape and type as logits.\n \"\"\"\n # TODO(jrru): Add axis argument which defaults to last dimension.\n with variable_scope.variable_scope(scope, 'softmax', [logits]):\n num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)\n logits_2d = array_ops.reshape(logits, [-1, num_logits])\n predictions = nn.softmax(logits_2d)\n predictions = array_ops.reshape(predictions, array_ops.shape(logits))\n predictions.set_shape(logits.get_shape())\n return predictions\n\n\ndef stack(inputs, layer, stack_args, **kwargs):\n \"\"\"Builds a stack of layers by applying layer repeatedly using stack_args.\n\n `stack` allows you to repeatedly apply the same operation with different\n arguments `stack_args[i]`. For each application of the layer, `stack` creates\n a new scope appended with an increasing number. For example:\n\n ```python\n y = stack(x, fully_connected, [32, 64, 128], scope='fc')\n # It is equivalent to:\n\n x = fully_connected(x, 32, scope='fc/fc_1')\n x = fully_connected(x, 64, scope='fc/fc_2')\n y = fully_connected(x, 128, scope='fc/fc_3')\n ```\n\n If the `scope` argument is not given in `kwargs`, it is set to\n `layer.__name__`, or `layer.func.__name__` (for `functools.partial`\n objects). If neither `__name__` nor `func.__name__` is available, the\n layers are called with `scope='stack'`.\n\n Args:\n inputs: A `Tensor` suitable for layer.\n layer: A layer with arguments `(inputs, *args, **kwargs)`\n stack_args: A list/tuple of parameters for each call of layer.\n **kwargs: Extra kwargs for the layer.\n\n Returns:\n a `Tensor` result of applying the stacked layers.\n\n Raises:\n ValueError: if the op is unknown or wrong.\n \"\"\"\n scope = kwargs.pop('scope', None)\n if not isinstance(stack_args, (list, tuple)):\n raise ValueError('stack_args need to be a list or tuple')\n with variable_scope.variable_scope(scope, 'Stack', [inputs]):\n inputs = ops.convert_to_tensor(inputs)\n if scope is None:\n if hasattr(layer, '__name__'):\n scope = layer.__name__\n elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):\n scope = layer.func.__name__ # In case layer is a functools.partial.\n else:\n scope = 'stack'\n outputs = inputs\n for i in range(len(stack_args)):\n kwargs['scope'] = scope + '_' + str(i+1)\n layer_args = stack_args[i]\n if not isinstance(layer_args, (list, tuple)):\n layer_args = [layer_args]\n outputs = layer(outputs, *layer_args, **kwargs)\n return outputs\n\n\n@add_arg_scope\ndef unit_norm(inputs, dim, epsilon=1e-7, scope=None):\n \"\"\"Normalizes the given input across the specified dimension to unit length.\n\n Note that the rank of `input` must be known.\n\n Args:\n inputs: A `Tensor` of arbitrary size.\n dim: The dimension along which the input is normalized.\n epsilon: A small value to add to the inputs to avoid dividing by zero.\n scope: Optional scope for variable_scope.\n\n Returns:\n The normalized `Tensor`.\n\n Raises:\n ValueError: If dim is smaller than the number of dimensions in 'inputs'.\n \"\"\"\n with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):\n if not inputs.get_shape():\n raise ValueError('The input rank must be known.')\n input_rank = len(inputs.get_shape().as_list())\n if dim < 0 or dim >= input_rank:\n raise ValueError(\n 'dim must be positive but smaller than the input rank.')\n\n lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(\n math_ops.square(inputs), dim, True))\n multiples = []\n if dim > 0:\n multiples.append(array_ops.ones([dim], dtypes.int32))\n multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1]))\n if dim < (input_rank - 1):\n multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))\n multiples = array_ops.concat(0, multiples)\n return math_ops.div(inputs, array_ops.tile(lengths, multiples))\n\n\ndef legacy_fully_connected(x,\n num_output_units,\n activation_fn=None,\n weight_init=initializers.xavier_initializer(),\n bias_init=init_ops.zeros_initializer,\n name=None,\n weight_collections=(ops.GraphKeys.WEIGHTS,),\n bias_collections=(ops.GraphKeys.BIASES,),\n output_collections=(ops.GraphKeys.ACTIVATIONS,),\n trainable=True,\n weight_regularizer=None,\n bias_regularizer=None):\n # pylint: disable=anomalous-backslash-in-string\n r\"\"\"Adds the parameters for a fully connected layer and returns the output.\n\n A fully connected layer is generally defined as a matrix multiply:\n `y = f(w * x + b)` where `f` is given by `activation_fn`. If\n `activation_fn` is `None`, the result of `y = w * x + b` is\n returned.\n\n If `x` has shape [\\\\\\(\\\\text{dim}_0, \\\\text{dim}_1, ..., \\\\text{dim}_n\\\\\\)]\n with more than 2 dimensions (\\\\\\(n > 1\\\\\\)), then we repeat the matrix\n multiply along the first dimensions. The result r is a tensor of shape\n [\\\\\\(\\\\text{dim}_0, ..., \\\\text{dim}_{n-1},\\\\\\) `num_output_units`],\n where \\\\\\( r_{i_0, ..., i_{n-1}, k} =\n \\\\sum_{0 \\\\leq j < \\\\text{dim}_n} x_{i_0, ... i_{n-1}, j} \\cdot w_{j, k}\\\\\\).\n This is accomplished by reshaping `x` to 2-D\n [\\\\\\(\\\\text{dim}_0 \\\\cdot ... \\\\cdot \\\\text{dim}_{n-1}, \\\\text{dim}_n\\\\\\)]\n before the matrix multiply and afterwards reshaping it to\n [\\\\\\(\\\\text{dim}_0, ..., \\\\text{dim}_{n-1},\\\\\\) `num_output_units`].\n\n This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting\n `bias_init` to `None`.\n\n The variable creation is compatible with `tf.variable_scope` and so can be\n reused with `tf.variable_scope` or `tf.make_template`.\n\n Most of the details of variable creation can be controlled by specifying the\n initializers (`weight_init` and `bias_init`) and in which collections to place\n the created variables (`weight_collections` and `bias_collections`; note that\n the variables are always added to the `VARIABLES` collection). The output of\n the layer can be placed in custom collections using `output_collections`.\n The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,\n respectively.\n\n A per layer regularization can be specified by setting `weight_regularizer`\n and `bias_regularizer`, which are applied to the weights and biases\n respectively, and whose output is added to the `REGULARIZATION_LOSSES`\n collection.\n\n Args:\n x: The input `Tensor`.\n num_output_units: The size of the output.\n activation_fn: activation function, default set to None to skip it and\n maintain a linear activation.\n weight_init: An optional weight initialization, defaults to\n `xavier_initializer`.\n bias_init: An initializer for the bias, defaults to 0. Set to `None` in\n order to disable bias.\n name: The name for this operation is used to name operations and to find\n variables. If specified it must be unique for this scope, otherwise a\n unique name starting with \"fully_connected\" will be created. See\n `tf.variable_scope` for details.\n weight_collections: List of graph collections to which weights are added.\n bias_collections: List of graph collections to which biases are added.\n output_collections: List of graph collections to which outputs are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n weight_regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`. Used for weights.\n bias_regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`. Used for biases.\n\n Returns:\n The output of the fully connected layer.\n\n Raises:\n ValueError: if x has rank less than 2 or if its last dimension is not set.\n \"\"\"\n with variable_scope.variable_scope(name, 'fully_connected', [x]):\n x = ops.convert_to_tensor(x)\n dims = x.get_shape().dims\n if dims is None:\n raise ValueError('dims of x must be known but is None')\n if len(dims) < 2:\n raise ValueError('rank of x must be at least 2 not: %d' % len(dims))\n num_input_units = dims[-1].value\n if num_input_units is None:\n raise ValueError('last dimension of x must be known but is None')\n dtype = x.dtype.base_dtype\n\n weight_collections = set(list(weight_collections or []) +\n [ops.GraphKeys.VARIABLES])\n w = variable_scope.get_variable('weights',\n shape=[num_input_units, num_output_units],\n dtype=dtype,\n initializer=weight_init,\n collections=weight_collections,\n regularizer=weight_regularizer,\n trainable=trainable)\n x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,\n [-1, num_input_units])\n y = standard_ops.matmul(x_2_dim, w)\n\n if bias_init is not None:\n bias_collections = set(list(bias_collections or []) +\n [ops.GraphKeys.VARIABLES])\n b = variable_scope.get_variable('bias',\n shape=[num_output_units],\n dtype=dtype,\n initializer=bias_init,\n collections=bias_collections,\n regularizer=bias_regularizer,\n trainable=trainable)\n\n y = nn.bias_add(y, b)\n\n if len(dims) > 2:\n out_shape = array_ops.unpack(array_ops.shape(x))\n out_shape[-1] = num_output_units\n\n y = array_ops.reshape(y, array_ops.pack(out_shape))\n\n static_shape = x.get_shape().as_list()\n static_shape[-1] = num_output_units\n y.set_shape(static_shape)\n\n return _apply_activation(y, activation_fn, output_collections)\n\n\n# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).\n# Simple aliases which remove the activation_fn parameter.\nlegacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)\nlegacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)\nrelu = functools.partial(fully_connected, activation_fn=nn.relu)\nrelu6 = functools.partial(fully_connected, activation_fn=nn.relu6)\nlinear = functools.partial(fully_connected, activation_fn=None)\n\n# Simple alias.\nconv2d = convolution2d\nconv2d_transpose = convolution2d_transpose\nconv2d_in_plane = convolution2d_in_plane\nseparable_conv2d = separable_convolution2d\n" ]
[ [ "tensorflow.contrib.layers.python.layers.utils.constant_value", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.nn.softmax", "tensorflow.python.ops.nn.separable_conv2d", "tensorflow.python.ops.nn.conv2d", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.training.moving_averages.assign_moving_average", "tensorflow.python.ops.nn.conv2d_transpose", "tensorflow.contrib.framework.python.ops.variables.model_variable", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.pack", "tensorflow.python.ops.standard_ops.matmul", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.nn.dropout", "tensorflow.contrib.layers.python.layers.utils.smart_cond", "tensorflow.python.ops.math_ops.mul", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.nn.bias_add", "tensorflow.python.ops.nn.atrous_conv2d", "tensorflow.python.ops.nn.moments", "tensorflow.python.ops.nn.avg_pool", "tensorflow.python.framework.ops.add_to_collections", "tensorflow.contrib.layers.python.layers.utils.collect_named_outputs", "tensorflow.python.ops.standard_ops.one_hot", "tensorflow.python.ops.array_ops.concat", "tensorflow.contrib.layers.python.layers.initializers.xavier_initializer", "tensorflow.python.ops.nn.max_pool", "tensorflow.contrib.layers.python.layers.utils.get_variable_collections", "tensorflow.python.ops.nn.depthwise_conv2d", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.standard_ops.to_int64", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.reshape", "tensorflow.contrib.layers.python.layers.utils.last_dimension", "tensorflow.contrib.layers.python.layers.utils.two_element_tuple", "tensorflow.python.ops.nn.batch_normalization" ] ]
tonouchi510/kfp-project
[ "67b78ae53cc3de594b8254999a4f553a8d5cec27" ]
[ "pipelines/head-pose-pipeline/training/capsulelayers.py" ]
[ "\"\"\"\nOriginal code taken from Author: Xifeng Guo, E-mail: `[email protected]`, Github: `https://github.com/XifengGuo/CapsNet-Keras`\nand adjusted for the needs of this project.\n\nSome key layers used for constructing a Capsule Network. These layers can used to construct CapsNet on other dataset, \nnot just on MNIST.\n\n*NOTE*: some functions can be implemented in multiple ways, I keep all of them. You can try them for yourself just by\nuncommenting them and commenting their counterparts.\n\nAuthor: Xifeng Guo, E-mail: `[email protected]`, Github: `https://github.com/XifengGuo/CapsNet-Keras`\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\n\ndef batch_dot(x, y, axes=None):\n \"\"\"Batchwise dot product.\n\n `batch_dot` is used to compute dot product of `x` and `y` when\n `x` and `y` are data in batch, i.e. in a shape of\n `(batch_size, :)`.\n `batch_dot` results in a tensor or variable with less dimensions\n than the input. If the number of dimensions is reduced to 1,\n we use `expand_dims` to make sure that ndim is at least 2.\n\n # Arguments\n x: Keras tensor or variable with `ndim >= 2`.\n y: Keras tensor or variable with `ndim >= 2`.\n axes: list of (or single) int with target dimensions.\n The lengths of `axes[0]` and `axes[1]` should be the same.\n\n # Returns\n A tensor with shape equal to the concatenation of `x`'s shape\n (less the dimension that was summed over) and `y`'s shape\n (less the batch dimension and the dimension that was summed over).\n If the final rank is 1, we reshape it to `(batch_size, 1)`.\n\n # Examples\n Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`\n `batch_dot(x, y, axes=1) = [[17], [53]]` which is the main diagonal\n of `x.dot(y.T)`, although we never have to calculate the off-diagonal\n elements.\n\n Shape inference:\n Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.\n If `axes` is (1, 2), to find the output shape of resultant tensor,\n loop through each dimension in `x`'s shape and `y`'s shape:\n\n * `x.shape[0]` : 100 : append to output shape\n * `x.shape[1]` : 20 : do not append to output shape,\n dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)\n * `y.shape[0]` : 100 : do not append to output shape,\n always ignore first dimension of `y`\n * `y.shape[1]` : 30 : append to output shape\n * `y.shape[2]` : 20 : do not append to output shape,\n dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)\n `output_shape` = `(100, 30)`\n\n ```python\n >>> x_batch = K.ones(shape=(32, 20, 1))\n >>> y_batch = K.ones(shape=(32, 30, 20))\n >>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])\n >>> K.int_shape(xy_batch_dot)\n (32, 1, 30)\n ```\n \"\"\"\n if isinstance(axes, int):\n axes = (axes, axes)\n x_ndim = K.ndim(x)\n y_ndim = K.ndim(y)\n if axes is None:\n # behaves like tf.batch_matmul as default\n axes = [x_ndim - 1, y_ndim - 2]\n # if K.any([isinstance(a, (list, tuple)) for a in axes]):\n # raise ValueError('Multiple target dimensions are not supported. ' +\n # 'Expected: None, int, (int, int), ' +\n # 'Provided: ' + str(axes))\n if x_ndim > y_ndim:\n diff = x_ndim - y_ndim\n y = tf.reshape(y, tf.concat([tf.shape(y), [1] * (diff)], axis=0))\n elif y_ndim > x_ndim:\n diff = y_ndim - x_ndim\n x = tf.reshape(x, tf.concat([tf.shape(x), [1] * (diff)], axis=0))\n else:\n diff = 0\n if K.ndim(x) == 2 and K.ndim(y) == 2:\n if axes[0] == axes[1]:\n out = tf.reduce_sum(tf.multiply(x, y), axes[0])\n else:\n out = tf.reduce_sum(tf.multiply(\n tf.transpose(x, [1, 0]), y), axes[1])\n else:\n if axes is not None:\n adj_x = None if axes[0] == K.ndim(x) - 1 else True\n adj_y = True if axes[1] == K.ndim(y) - 1 else None\n else:\n adj_x = None\n adj_y = None\n out = tf.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)\n if diff:\n if x_ndim > y_ndim:\n idx = x_ndim + y_ndim - 3\n else:\n idx = x_ndim - 1\n out = tf.squeeze(out, list(range(idx, idx + diff)))\n if K.ndim(out) == 1:\n out = K.expand_dims(out, 1)\n return out\n\n\ndef squash(vectors, axis=-1):\n \"\"\"\n The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0\n :param vectors: some vectors to be squashed, N-dim tensor\n :param axis: the axis to squash\n :return: a Tensor with same shape as input vectors\n \"\"\"\n s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)\n scale = s_squared_norm / (1 + s_squared_norm) / \\\n K.sqrt(s_squared_norm + K.epsilon())\n return scale * vectors\n\n\nclass CapsuleLayer(tf.keras.layers.Layer):\n \"\"\"\n The capsule layer. It is similar to Dense layer. Dense layer has `in_num` inputs, each is a scalar, the output of the \n neuron from the former layer, and it has `out_num` output neurons. CapsuleLayer just expand the output of the neuron\n from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_capsule] and output shape = \\\n [None, num_capsule, dim_capsule]. For Dense Layer, input_dim_capsule = dim_capsule = 1.\n\n :param num_capsule: number of capsules in this layer\n :param dim_capsule: dimension of the output vectors of the capsules in this layer\n :param routings: number of iterations for the routing algorithm\n \"\"\"\n\n def __init__(\n self,\n num_capsule,\n dim_capsule,\n routings=3,\n kernel_initializer=\"glorot_uniform\",\n **kwargs\n ):\n super(CapsuleLayer, self).__init__(**kwargs)\n self.num_capsule = num_capsule\n self.dim_capsule = dim_capsule\n self.routings = routings\n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n\n def build(self, input_shape):\n assert len(\n input_shape) >= 3, \"The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]\"\n self.input_num_capsule = input_shape[1]\n self.input_dim_capsule = input_shape[2]\n\n # Transform matrix\n self.W = self.add_weight(\n shape=[self.num_capsule,\n self.input_num_capsule,\n self.dim_capsule, self.input_dim_capsule],\n initializer=self.kernel_initializer,\n name=\"W\"\n )\n\n self.built = True\n\n def call(self, inputs, training=None):\n # inputs.shape=[None, input_num_capsule, input_dim_capsule]\n # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]\n inputs_expand = K.expand_dims(inputs, 1)\n\n # Replicate num_capsule dimension to prepare being multiplied by W\n # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]\n inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])\n\n # Compute `inputs * W` by scanning inputs_tiled on dimension 0.\n # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]\n # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]\n # Regard the first two dimensions as `batch` dimension,\n # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].\n # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]\n inputs_hat = K.map_fn(lambda x: batch_dot(\n x, self.W, [2, 3]), elems=inputs_tiled)\n\n # Begin: Routing algorithm ---------------------------------------------------------------------#\n # The prior for coupling coefficient, initialized as zeros.\n # b.shape = [None, self.num_capsule, self.input_num_capsule].\n b = tf.zeros(shape=[K.shape(inputs_hat)[0],\n self.num_capsule, self.input_num_capsule])\n output_list = []\n assert self.routings > 0, 'The routings should be > 0.'\n for i in range(self.routings):\n # c.shape=[batch_size, num_capsule, input_num_capsule]\n c = tf.nn.softmax(b, axis=1)\n\n # c.shape = [batch_size, num_capsule, input_num_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].\n # outputs.shape=[None, num_capsule, dim_capsule]\n # [None, 10, 16]\n outputs = squash(batch_dot(c, inputs_hat, [2, 2]))\n # output_list.append(K.expand_dims(outputs,axis=-1))\n if i < self.routings - 1:\n # outputs.shape = [None, num_capsule, dim_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].\n # b.shape=[batch_size, num_capsule, input_num_capsule]\n b += batch_dot(outputs, inputs_hat, [2, 3])\n # End: Routing algorithm -----------------------------------------------------------------------#\n # return K.concatenate(output_list,-1)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tuple([None, self.num_capsule, self.dim_capsule])\n # return tuple([None, self.num_capsule, self.dim_capsule, self.routings])\n\n def get_config(self):\n config = {\n 'num_capsule': self.num_capsule,\n 'dim_capsule': self.dim_capsule,\n 'routings': self.routings\n }\n base_config = super(CapsuleLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MatMulLayer(tf.keras.layers.Layer):\n\n def __init__(self, output_dim, type, **kwargs):\n self.output_dim = output_dim\n self.type = type\n super(MatMulLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n\n # Create a trainable weight variable for this layer.\n if self.type == 1:\n self.kernel = self.add_weight(\n name='kernel_type1',\n shape=(\n input_shape[-1], self.output_dim),\n initializer='glorot_uniform',\n trainable=True\n )\n elif self.type == 2:\n self.kernel = self.add_weight(\n name='kernel_type2',\n shape=(\n input_shape[1], self.output_dim),\n initializer='glorot_uniform',\n trainable=True\n )\n\n # Be sure to call this at the end\n super(MatMulLayer, self).build(input_shape)\n\n def call(self, inputs):\n if self.type == 1:\n return K.dot(inputs, self.kernel)\n elif self.type == 2:\n new_inputs = K.permute_dimensions(inputs, (0, 2, 1))\n outputs = K.dot(new_inputs, self.kernel)\n return K.permute_dimensions(outputs, (0, 2, 1))\n\n def compute_output_shape(self, input_shape):\n if self.type == 1:\n return tuple([None, input_shape[1], self.output_dim])\n elif self.type == 2:\n return tuple([None, self.output_dim, input_shape[2]])\n" ]
[ [ "tensorflow.matmul", "tensorflow.keras.backend.tile", "tensorflow.nn.softmax", "tensorflow.multiply", "tensorflow.keras.backend.permute_dimensions", "tensorflow.shape", "tensorflow.keras.backend.dot", "tensorflow.transpose", "tensorflow.keras.backend.ndim", "tensorflow.keras.backend.square", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.epsilon", "tensorflow.keras.initializers.get" ] ]
ekta1224/jellyfish
[ "3271019434448b5916dcc920d640b81375b74c05" ]
[ "tests/read_snapshot_test.py" ]
[ "import numpy as np\nimport jellyfish\n\n\ndef loading_halo():\n path = '../examples/'\n snap_name = 'test_snap'\n nhost = 1000000\n nsat = 450000\n sim = jellyfish.Hello_sim(path, snap_name, nhost, nsat, 'host_dm', 'com_host', 'pos') \n pos = sim.read_MW_snap_com_coordinates()\n assert(len(pos)==nhost), 'Length of particle array does not much with the \\\n expected {}'.format(nhost)\n assert(np.shape(pos)==(nhost, 3)), 'Wrong dimension of the position vector'\n \n\nif __name__ == \"__main__\":\n loading_halo()\n" ]
[ [ "numpy.shape" ] ]
nibraaska/td_wm
[ "543cd91e87ebd478e79d821fa8708885df5899c5" ]
[ "combined_model/all_stats/dynamic_t_non_obs_stats/newStats/reset_model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nnp.set_printoptions(threshold=np.inf)\n\nimport time, sys, random, pylab\nfrom math import fabs\n\nfrom random import randrange\nfrom random import choice\n\nfrom hrr import *\n\nfrom IPython.display import clear_output\nfrom sys import argv\n\nseed_val = int(argv[1])\n\n\n# In[ ]:\n\n\ndef seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n\n\n# In[ ]:\n\n\nseed(seed_val)\n\n\n# In[ ]:\n\n\ndef plot_all_graphs():\n get_ipython().run_line_magic('matplotlib', 'inline')\n fig, axes = plt.subplots(nrows=num_of_atrs, ncols=num_obs_tasks+1)\n fig.set_figwidth(15)\n fig.set_figheight(15)\n plt.rcParams.update({'font.size': 14})\n \n if num_of_atrs > 1:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_rwd = 0\n y_for_no_rwd = 0\n\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in signals + [\"I\"]:\n lab = \"WM:\" + wm + \"*Signal:\" + signal + reward_tkn() + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state) + \"*rewardTkn\", x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[x_ind,y_for_rwd].title.set_text(wm + \" with rewardTkn \" + \"Atr: \" + str(x))\n axes[x_ind,y_for_rwd].plot(position, value, label=lab)\n axes[x_ind,y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[x_ind,y_for_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_rwd += 1\n\n y = x + 1\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[x_ind,y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[x_ind,y_for_no_rwd].plot(position, value, label=lab)\n axes[x_ind,y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[x_ind,y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1\n \n else:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_rwd = 0\n y_for_no_rwd = 0\n\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in signals + [\"I\"]:\n lab = \"WM:\" + wm + \"*Signal:\" + signal + reward_tkn() + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state) + \"*rewardTkn\", x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[y_for_no_rwd].title.set_text(wm + \" with rewardTkn \" + \"Atr: \" + str(x))\n axes[y_for_no_rwd].plot(position, value, label=lab)\n axes[y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_rwd += 1\n\n y = x + 1\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[y_for_no_rwd].plot(position, value, label=lab)\n axes[y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1\n\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.show()\n \ndef plot_graph(data):\n get_ipython().run_line_magic('matplotlib', 'inline')\n plt.plot(data)\n plt.show()\n \ndef live_graphs():\n get_ipython().run_line_magic('matplotlib', 'qt')\n mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=[\"r\", \"g\", \"b\", \"y\"]) \n fig, axes = plt.subplots(nrows=num_of_atrs, ncols=num_obs_tasks+1)\n \n if num_of_atrs > 1:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_no_rwd = 0\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[x_ind,y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[x_ind,y_for_no_rwd].plot(position, value, label=lab)\n axes[x_ind,y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[x_ind,y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1\n else:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_no_rwd = 0\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[y_for_no_rwd].plot(position, value, label=lab)\n axes[y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1 \n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n ani = animation.FuncAnimation(fig, animate, interval=60000)\n plt.show()\n plt.suptitle(\"{0} Non-Observable tasks and {1} Observable tasks with goals: {2}\".format(num_non_obs_tasks, num_obs_tasks, goals), fontsize=30)\n \ndef animate(i):\n if num_of_atrs > 1:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_no_rwd = 0\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[x_ind,y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[x_ind,y_for_no_rwd].plot(position, value, label=lab)\n axes[x_ind,y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[x_ind,y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1\n else:\n for x in range(num_of_atrs):\n x_ind = x\n y_for_no_rwd = 0\n for wm in list(dict.fromkeys([signal + \"In\" if signal != \"I\" else signal for signal in signals] + [\"I\"])):\n position = np.arange(size_of_maze)\n value = np.zeros(size_of_maze)\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n lab = \"WM:\" + wm + \"*Signal:\" + signal + \"*Atr:\" + str(x)\n for state in range(size_of_maze):\n encode_str = build_hrr_string(wm, signal, str(state), x)\n value[state] = np.dot(weights, ltm.encode(encode_str)) + bias\n axes[y_for_no_rwd].title.set_text(wm + \" Atr: \" + str(x))\n axes[y_for_no_rwd].plot(position, value, label=lab)\n axes[y_for_no_rwd].tick_params(direction='out', length=6, width=2,\n grid_color='r', grid_alpha=0.5)\n axes[y_for_no_rwd].legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=1, prop={'size': 10})\n y_for_no_rwd += 1\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n\n# In[ ]:\n\n\ndef update_progress(progress, episode):\n bar_length = 50\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n if progress < 0:\n progress = 0\n if progress >= 1:\n progress = 1\n\n block = int(round(bar_length * progress))\n\n clear_output(wait = True)\n text = \"Episode {0}, Progress: [{1}] {2:.1f}%\".format(episode, \"=\" * block + \".\" * (bar_length - block), progress * 100)\n print(text)\n\n\n# In[ ]:\n\n\ndef get_moves(state, size_of_maze):\n if(state == 0):\n return size_of_maze - 1, 1\n elif(state == size_of_maze - 1):\n return size_of_maze - 2, 0\n else:\n return state - 1, state + 1\n\n\n# In[ ]:\n\n\ndef build_hrr_string(wm, signal, state, atr):\n if wm == \"I\" and signal == \"I\":\n return \"State:\" + str(state) + \"*\" + \"Atr:\" + str(atr)\n elif wm == \"I\":\n return \"Signal:\" + str(signal) + \"*\" + \"State:\" + str(state) + \"*\" + \"Atr:\" + str(atr)\n elif signal == \"I\":\n return \"WM:\" + str(wm) + \"*\" + \"State:\" + str(state) + \"*\" + \"Atr:\" + str(atr)\n else:\n return \"WM:\" + str(wm) + \"*\" + \"Signal:\" + str(signal) + \"*\" + \"State:\" + str(state) + \"*\" + \"Atr:\" + str(atr)\n\n\n# In[ ]:\n\n\ndef context_policy_negative(atr):\n return (atr + 1)%num_of_atrs\n\ndef context_policy_positive(wm, signal, state, atr):\n val = -9999\n for atr in range(0, num_of_atrs):\n encode_str = build_hrr_string(wm, signal, state, atr)\n temp = np.dot(weights, ltm.encode(encode_str)) + bias\n if temp > val:\n val = temp\n s_atr = atr\n return s_atr\n\n\n# In[ ]:\n\n\ndef reward_tkn():\n return \"*rewardTkn\"\n\n\n# In[ ]:\n\n\ndef move_policy(goal, moves, wms, signals, atr, rand_on):\n val = -9999\n for move in moves:\n for wm in list(dict.fromkeys(wms + [\"I\"])):\n for signal in list(dict.fromkeys(signals + [\"I\"])):\n if move == goal:\n encode_str = build_hrr_string(wm, signal, str(move) + reward_tkn(), atr)\n else:\n encode_str = build_hrr_string(wm, signal, move, atr)\n if (debug):\n print(encode_str)\n temp = np.dot(weights, ltm.encode(encode_str)) + bias\n if debug:\n if signal != \"I\":\n print(\"Move: {0}, WM: {1}, Signal: {2}In, Atr: {3}, Value: {4}\".format(move, wm, signal, atr, temp))\n else:\n print(\"Move: {0}, WM: {1}, Signal: {2}, Atr: {3}, Value: {4}\".format(move, wm, signal, atr, temp))\n if temp > val:\n val = temp\n s_move = move\n if signal != \"I\":\n s_wm = signal + \"In\"\n else:\n s_wm = wm\n if(np.random.random_sample() < e_soft) and rand_on:\n if(debug):\n print(\"RANDOM MOVE\")\n return (np.random.choice(moves), wm, atr, True)\n \n return (s_move, s_wm, atr, False)\n\n\n# In[ ]:\n\n\ndef logmod(x):\n return np.sign(x)*np.log(abs(x)+1)\n\n\n# In[ ]:\n\n\ndef get_opt_steps(start, goal, size_of_maze):\n opt = abs(goal - start)\n if opt > size_of_maze / 2:\n opt = size_of_maze - opt\n return opt\n\n\n# In[ ]:\n\n\ndef start_testing(testing, rand_on, alpha, threshold_alpha, atr_alpha):\n testing = True\n rand_on = 0\n alpha = 0.01\n threshold_alpha = 0\n atr_alpha = 0\n return testing, rand_on, alpha, threshold_alpha, atr_alpha\n\n\n# In[ ]:\n\n\ndef reset(num_of_atrs, atr_values, threshold, hrr_length, ltm, weights, eligibility):\n num_of_atrs += 1\n atr_values = [1 * reward_good] * num_of_atrs\n if dynamic_threshold:\n threshold = 1\n hrr_length = (num_of_atrs * hrr_length) / (num_of_atrs - 1)\n del ltm\n ltm = LTM(int(hrr_length), normalized)\n weights = hrr(int(hrr_length), normalized)\n eligibility = np.zeros(int(hrr_length))\n return num_of_atrs, atr_values, threshold, hrr_length, ltm, weights, eligibility\n\n\n# In[ ]:\n\n\n# Number of training cycles\nepisodes = 100000\n\n# Hrr parameters\nhrr_length = 7168\nnormalized = True\n\n# How many steps to take before quiting\nsteps_till_quit = 100\n\n# Task\nsignals = [\"I\"]\ngoals = [[0], [4], [7], [10], [13]]\n\n# Maze parameters\nsize_of_maze = 20\nnon_obs_task_switch_rate = 500\nnum_non_obs_tasks = len(goals)\nnum_obs_tasks = len(signals)\n\n# Arguments for neural network\ninput_size = hrr_length\noutput_size = 1\ndiscount = 0.7\nalpha = 0.1\n\n# Reward for temporal difference learning\nreward_bad = -1\nreward_good = 0\n\n# Dynamic atrs hyperparameters\nnum_of_atrs = 1\natr_alpha = 0.00063\natr_values = (np.ones(num_of_atrs) * reward_good).tolist()\natr_threshold = -0.5\nthreshold_vals = []\n\n# Threshold for non observable task switching\n# threshold = 0.3\nthreshold = 1\nthreshold_alpha = 0.0001\ndynamic_threshold = True\n\n# Expolration rate\ne_soft = 0.00001\nrand_on = 1\n\n# Eligibility trace rate\neli_lambda = 0.0\n\n# Neural network\nweights = hrr(hrr_length, normalized)\nbias = 1\n\n# Eligibility trace\neligibility = np.zeros(hrr_length)\n\n# Accurcay test percentage\npercent_check = 9\n\n# Start values for the agent\nnon_obs = 0\ncurrent_atr = 0\ncurrent_wm = \"I\"\nchanged = False\n\n# Flag for printing values\ndebug = False\ntesting = False\n\ncreate_plots = False\nepisodic_memory = False\n\nstep_store = []\nif create_plots:\n pos_err_store = []\n neg_err_store = []\n total_error = []\n total_goal_error = []\n switch_error = []\n norm_error = []\n\n# Live graph flag\nlive_graph = False\n\n# Ltm is created\nltm = LTM(hrr_length, normalized)\n\n\n# In[ ]:\n\n\n# start_time = time.time()\n\n\n# In[ ]:\n\n\nfor x in range(episodes):\n \n # Initial state\n current_state = random.randint(0, size_of_maze - 1)\n start = current_state\n current_signal = np.random.choice(signals)\n eligibility *= 0.0\n \n if episodic_memory:\n episode_memory = []\n \n changed = False\n \n # Set the goal for the tast\n if x%non_obs_task_switch_rate == 0:\n non_obs = choice([i for i in range(len(goals)) if i not in [non_obs]])\n changed = True\n if num_obs_tasks == 1:\n goal = goals[non_obs][0]\n else:\n goal = goals[non_obs][signals.index(current_signal)]\n \n steps = 0\n opt_steps = get_opt_steps(current_state, goal, size_of_maze)\n \n # Start testing phase\n if testing == False and x > ((episodes*percent_check) / 10):\n testing, rand_on, alpha, threshold_alpha, atr_alpha = start_testing(testing, rand_on, alpha, threshold_alpha, atr_alpha)\n \n for y in range(steps_till_quit):\n if create_plots:\n threshold_vals += [threshold]\n if (current_state == goal):\n encode_str = build_hrr_string(current_wm, current_signal, str(current_state) + reward_tkn(), current_atr)\n goal_hrr = ltm.encode(encode_str)\n goal_value = np.dot(weights, goal_hrr) + bias\n \n if episodic_memory:\n episode_memory += [[current_state, goal_value, goal]]\n\n error = reward_good - goal_value\n eligibility *= eli_lambda\n eligibility = eligibility + goal_hrr\n weights = np.add(weights, (alpha * logmod(error) * eligibility))\n \n if dynamic_threshold:\n threshold += threshold_alpha * logmod(error)\n \n atr_values[current_atr] += atr_alpha * logmod(error)\n \n if create_plots:\n total_goal_error += [error]\n \n if(debug):\n print(\"In goal with value {0}\".format(goal_value))\n \n break\n \n # Store info about previous state \n previous_wm = current_wm\n previous_signal = current_signal\n previous_state = current_state\n previous_atr = current_atr\n \n if debug:\n print(\"Previous WM:, {0}, Signal:, {1}, State, {2}, ATR:, {3}\".format(previous_wm, previous_signal, previous_state, previous_atr))\n \n encode_str = build_hrr_string(previous_wm, previous_signal, previous_state, previous_atr)\n previous_state_hrr = ltm.encode(encode_str)\n previous_value = np.dot(weights, previous_state_hrr) + bias\n \n if debug:\n print(\"Started with state: {0}, State Value: {1}, WM: {2}, Atr: {3}\".format(previous_state, previous_value, previous_wm, previous_atr))\n \n current_signal = \"I\"\n left, right = get_moves(previous_state, size_of_maze)\n if previous_signal != \"I\":\n previous_signal += \"In\"\n \n # Make the move\n move, wm, current_atr, random_move = move_policy(goal, [left, right], [previous_wm, previous_signal], [current_signal], previous_atr, rand_on)\n steps += 1\n current_wm = wm\n current_state = move\n \n if random_move:\n eligibility *= 0.0\n \n if(debug):\n print(\"Moves {0}, taken {1}\".format([left, right], move))\n \n if debug:\n print(\"Current WM {0}, Current Signal {1}, Current state {2}, Current ATR {3}\".format(current_wm, current_signal, current_state, current_atr))\n \n if current_state == goal:\n encode_str = build_hrr_string(current_wm, current_signal, str(current_state) + reward_tkn(), current_atr) \n if debug:\n print(\"In goal: WM: {1}, ATR: {2}\".format(current_wm, current_atr))\n else:\n encode_str = build_hrr_string(current_wm, current_signal, current_state, current_atr)\n \n current_state_hrr = ltm.encode(encode_str)\n current_value = np.dot(weights, current_state_hrr) + bias\n \n sarsa_error = (reward_bad + discount * current_value) - previous_value\n eligibility *= eli_lambda\n eligibility = eligibility + previous_state_hrr\n weights = np.add(weights, (alpha * logmod(sarsa_error) * eligibility))\n\n atr_values[current_atr] += atr_alpha * logmod(sarsa_error)\n \n if dynamic_threshold:\n threshold += threshold_alpha * logmod(sarsa_error)\n \n if create_plots:\n total_error += [sarsa_error]\n norm_error += [sarsa_error]\n \n if sarsa_error > fabs(threshold) or sarsa_error < -fabs(threshold):\n \n if np.mean(atr_values) < atr_threshold:\n num_of_atrs, atr_values, threshold, hrr_length, ltm, weights, eligibility = reset(num_of_atrs, atr_values, threshold, hrr_length, ltm, weights, eligibility)\n \n if create_plots:\n switch_error += [sarsa_error]\n \n if create_plots:\n if testing and sarsa_error > fabs(threshold):\n pos_err_store += [sarsa_error]\n elif testing and sarsa_error < -fabs(threshold):\n neg_err_store += [sarsa_error]\n \n if sarsa_error > fabs(threshold):\n current_atr = context_policy_positive(current_wm, current_signal, current_state, current_atr)\n elif sarsa_error < -fabs(threshold):\n current_atr = context_policy_negative(previous_atr)\n \n eligibility *= 0.0\n \n if changed:\n steps = 0\n start = current_state\n opt_steps = get_opt_steps(current_state, goal, size_of_maze)\n \n if(debug):\n print(\"Changed atr from {0} to {1}\".format(previous_atr, current_atr))\n \n if debug:\n input(\"\")\n \n if testing:\n if current_state == goal:\n step_store += [steps - opt_steps]\n else:\n step_store += [steps_till_quit]\n \n# update_progress(x / episodes, x)\n \n if live_graph:\n plt.pause(0.001)\n \n#update_progress(1, episodes)\n\n\n# In[ ]:\n\n\n# end_time = time.time()\n# print(\"Total time: {0} minutes\".format((end_time - start_time)/60))\n\n\n# In[ ]:\n\n\n# plot_graph(step_store)\naccuracy = (len(step_store)-np.count_nonzero(step_store))*100.0 / len(step_store)\nprint(accuracy)\n\n\n# In[ ]:\n\n\n# plot_all_graphs()\n\n\n# In[ ]:\n\n\nif create_plots:\n plot_graph(pos_err_store)\n\n\n# In[ ]:\n\n\nif create_plots:\n plot_graph(neg_err_store)\n\n\n# In[ ]:\n\n\nif create_plots:\n plot_graph(total_error)\n\n\n# In[ ]:\n\n\nif create_plots:\n plot_graph(total_goal_error)\n\n\n# In[ ]:\n\n\nif create_plots:\n plt.plot(switch_error)\n\n\n# In[ ]:\n\n\nif create_plots:\n plot_graph(norm_error)\n\n\n# In[ ]:\n\n\n# threshold\n\n\n# In[ ]:\n\n\n# print(atr_values)\n\n\n# In[ ]:\n\n\n# plot_graph(threshold_vals)\n\n\n# In[ ]:\n\n\n# hrr_length\n\n\n# In[ ]:\n\n\n# ltm.count()\n\n\n# In[ ]:\n\n\n# seed_val\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "numpy.random.choice", "numpy.arange", "numpy.set_printoptions", "matplotlib.pyplot.subplots", "numpy.random.random_sample", "numpy.ones", "matplotlib.pyplot.plot", "numpy.sign", "numpy.mean", "numpy.count_nonzero", "matplotlib.cycler", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.pause" ] ]
AhmedYounes94/Keras-Self-Attention-Seq
[ "0ae0c32df3e73e6923e90894c6a901066eddf379" ]
[ "keras_self_attention/seq_self_attention.py" ]
[ "import keras\nimport keras.backend as K\nimport tensorflow as tf\n\n\nclass SeqSelfAttention(keras.layers.Layer):\n\n ATTENTION_TYPE_ADD = 'additive'\n ATTENTION_TYPE_MUL = 'multiplicative'\n\n def __init__(self,\n units=32,\n attention_width=None,\n attention_type=ATTENTION_TYPE_ADD,\n return_attention=False,\n history_only=False,\n kernel_initializer='glorot_normal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_additive_bias=True,\n use_attention_bias=True,\n attention_activation=None,\n attention_regularizer_weight=0.0,\n **kwargs):\n \"\"\"Layer initialization.\n\n :param units: The dimension of the vectors that used to calculate the attention weights.\n :param attention_width: The width of local attention.\n :param attention_type: 'additive' or 'multiplicative'.\n :param return_attention: Whether to return the attention weights for visualization.\n :param history_only: Only use historical pieces of data.\n :param kernel_initializer: The initializer for weight matrices.\n :param bias_initializer: The initializer for biases.\n :param kernel_regularizer: The regularization for weight matrices.\n :param bias_regularizer: The regularization for biases.\n :param kernel_constraint: The constraint for weight matrices.\n :param bias_constraint: The constraint for biases.\n :param use_additive_bias: Whether to use bias while calculating the relevance of inputs features\n in additive mode.\n :param use_attention_bias: Whether to use bias while calculating the weights of attention.\n :param attention_activation: The activation used for calculating the weights of attention.\n :param attention_regularizer_weight: The weights of attention regularizer.\n :param kwargs: Parameters for parent class.\n \"\"\"\n self.supports_masking = True\n self.units = units\n self.attention_width = attention_width\n self.attention_type = attention_type\n self.return_attention = return_attention\n self.history_only = history_only\n if history_only and attention_width is None:\n self.attention_width = int(1e10)\n\n self.use_additive_bias = use_additive_bias\n self.use_attention_bias = use_attention_bias\n self.kernel_initializer = keras.initializers.get(kernel_initializer)\n self.bias_initializer = keras.initializers.get(bias_initializer)\n self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = keras.constraints.get(kernel_constraint)\n self.bias_constraint = keras.constraints.get(bias_constraint)\n self.attention_activation = keras.activations.get(attention_activation)\n self.attention_regularizer_weight = attention_regularizer_weight\n self._backend = keras.backend.backend()\n\n if attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self.Wx, self.Wt, self.bh = None, None, None\n self.Wa, self.ba = None, None\n elif attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self.Wa, self.ba = None, None\n else:\n raise NotImplementedError('No implementation for attention type : ' + attention_type)\n\n super(SeqSelfAttention, self).__init__(**kwargs)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'attention_width': self.attention_width,\n 'attention_type': self.attention_type,\n 'return_attention': self.return_attention,\n 'history_only': self.history_only,\n 'use_additive_bias': self.use_additive_bias,\n 'use_attention_bias': self.use_attention_bias,\n 'kernel_initializer': keras.regularizers.serialize(self.kernel_initializer),\n 'bias_initializer': keras.regularizers.serialize(self.bias_initializer),\n 'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),\n 'bias_constraint': keras.constraints.serialize(self.bias_constraint),\n 'attention_activation': keras.activations.serialize(self.attention_activation),\n 'attention_regularizer_weight': self.attention_regularizer_weight,\n }\n base_config = super(SeqSelfAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self._build_additive_attention(input_shape)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self._build_multiplicative_attention(input_shape)\n super(SeqSelfAttention, self).build(input_shape)\n\n def _build_additive_attention(self, input_shape):\n feature_dim = input_shape[2]\n\n self.Wt = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wt'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.Wx = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wx'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_additive_bias:\n self.bh = self.add_weight(shape=(self.units,),\n name='{}_Add_bh'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n self.Wa = self.add_weight(shape=(self.units, 1),\n name='{}_Add_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Add_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def _build_multiplicative_attention(self, input_shape):\n feature_dim = input_shape[2]\n\n self.Wa = self.add_weight(shape=(feature_dim, feature_dim),\n name='{}_Mul_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Mul_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def call(self, inputs, mask=None, **kwargs):\n if isinstance(inputs, list):\n inputs, positions = inputs\n positions = K.cast(positions, 'int32')\n mask = mask[1]\n else:\n positions = None\n\n input_len = K.shape(inputs)[1]\n\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n e = self._call_additive_emission(inputs)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n e = self._call_multiplicative_emission(inputs)\n\n if self.attention_activation is not None:\n e = self.attention_activation(e)\n e = K.exp(e)\n if self.attention_width is not None:\n ones = tf.ones((input_len, input_len))\n if self.history_only:\n local = tf.matrix_band_part(\n ones,\n K.minimum(input_len, self.attention_width - 1),\n 0,\n )\n else:\n local = tf.matrix_band_part(\n ones,\n K.minimum(input_len, self.attention_width // 2),\n K.minimum(input_len, (self.attention_width - 1) // 2),\n )\n e = e * K.expand_dims(local, 0)\n if mask is not None:\n mask = K.cast(mask, K.floatx())\n mask = K.expand_dims(mask)\n e = K.permute_dimensions(K.permute_dimensions(e * mask, (0, 2, 1)) * mask, (0, 2, 1))\n\n # a_{t} = \\text{softmax}(e_t)\n s = K.sum(e, axis=-1)\n s = K.tile(K.expand_dims(s, axis=-1), K.stack([1, 1, input_len]))\n a = e / (s + K.epsilon())\n\n # l_t = \\sum_{t'} a_{t, t'} x_{t'}\n inputs = K.permute_dimensions(inputs, (0, 2, 1))\n v = K.permute_dimensions(K.batch_dot(inputs, K.permute_dimensions(a, (0, 2, 1))), (0, 2, 1))\n if self.attention_regularizer_weight > 0.0:\n self.add_loss(self._attention_regularizer(a))\n\n if positions is not None:\n pos_num = K.shape(positions)[1]\n batch_indices = K.tile(K.expand_dims(K.arange(K.shape(inputs)[0]), axis=-1), K.stack([1, pos_num]))\n pos_indices = K.stack([batch_indices, positions], axis=-1)\n v = tf.gather_nd(v, pos_indices)\n a = tf.gather_nd(a, pos_indices)\n\n if self.return_attention:\n return [v, a]\n return v\n\n def _call_additive_emission(self, inputs):\n input_shape = K.shape(inputs)\n batch_size, input_len = input_shape[0], input_shape[1]\n\n # h_{t, t'} = \\tanh(x_t^T W_t + x_{t'}^T W_x + b_h)\n q, k = K.dot(inputs, self.Wt), K.dot(inputs, self.Wx)\n q = K.tile(K.expand_dims(q, 2), K.stack([1, 1, input_len, 1]))\n k = K.tile(K.expand_dims(k, 1), K.stack([1, input_len, 1, 1]))\n if self.use_additive_bias:\n h = K.tanh(q + k + self.bh)\n else:\n h = K.tanh(q + k)\n\n # e_{t, t'} = W_a h_{t, t'} + b_a\n if self.use_attention_bias:\n e = K.reshape(K.dot(h, self.Wa) + self.ba, (batch_size, input_len, input_len))\n else:\n e = K.reshape(K.dot(h, self.Wa), (batch_size, input_len, input_len))\n return e\n\n def _call_multiplicative_emission(self, inputs):\n # e_{t, t'} = x_t^T W_a x_{t'} + b_a\n e = K.batch_dot(K.dot(inputs, self.Wa), K.permute_dimensions(inputs, (0, 2, 1)))\n if self.use_attention_bias:\n e = e + self.ba\n return e\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape, pos_shape = input_shape\n output_shape = (input_shape[0], pos_shape[1], input_shape[2])\n else:\n output_shape = input_shape\n if self.return_attention:\n attention_shape = (input_shape[0], output_shape[1], input_shape[1])\n return [output_shape, attention_shape]\n return output_shape\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(inputs, list):\n mask = mask[1]\n if self.return_attention:\n return [mask, None]\n return mask\n\n def _attention_regularizer(self, attention):\n batch_size = K.cast(K.shape(attention)[0], K.floatx())\n input_len = K.shape(attention)[-1]\n return self.attention_regularizer_weight * K.sum(K.square(K.batch_dot(\n attention,\n K.permute_dimensions(attention, (0, 2, 1))) - tf.eye(input_len))) / batch_size\n\n @staticmethod\n def get_custom_objects():\n return {'SeqSelfAttention': SeqSelfAttention}\n" ]
[ [ "tensorflow.gather_nd", "tensorflow.ones", "tensorflow.eye" ] ]
yinzhangyue/PDF_tool
[ "ff1c689478e0d40370724ad88da78ef8bd0bf3d1", "ff1c689478e0d40370724ad88da78ef8bd0bf3d1" ]
[ "Model/mmdet/core/post_processing/bbox_nms.py", "frontend/tool/a.py" ]
[ "import torch\n\nfrom mmdet.ops.nms import nms_wrapper\n\n\ndef multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None):\n \"\"\"NMS for multi-class bboxes.\n\n Args:\n multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n multi_scores (Tensor): shape (n, #class), where the 0th column\n contains scores of the background class, but this will be ignored.\n score_thr (float): bbox threshold, bboxes with scores lower than it\n will not be considered.\n nms_thr (float): NMS IoU threshold\n max_num (int): if there are more than max_num bboxes after NMS,\n only top max_num will be kept.\n score_factors (Tensor): The factors multiplied to scores before\n applying NMS\n\n Returns:\n tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels\n are 0-based.\n \"\"\"\n #print('calling this')\n num_classes = multi_scores.shape[1]\n bboxes, labels = [], []\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n nms_op = getattr(nms_wrapper, nms_type)\n #print(nms_op)\n for i in range(1, num_classes):\n cls_inds = multi_scores[:, i] > score_thr\n if not cls_inds.any():\n continue\n # get bboxes and scores of this class\n if multi_bboxes.shape[1] == 4:\n _bboxes = multi_bboxes[cls_inds, :]\n else:\n _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]\n _scores = multi_scores[cls_inds, i]\n if score_factors is not None:\n _scores *= score_factors[cls_inds]\n cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)\n cls_dets, _ = nms_op(cls_dets, **nms_cfg_)\n cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ),\n i - 1,\n dtype=torch.long)\n bboxes.append(cls_dets)\n labels.append(cls_labels)\n if bboxes:\n bboxes = torch.cat(bboxes)\n labels = torch.cat(labels)\n if bboxes.shape[0] > max_num:\n _, inds = bboxes[:, -1].sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n labels = labels[inds]\n else:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n\n return bboxes, labels\n", "# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2.cv2 as cv2\nfrom numpy import float32\n\nif __name__ == \"__main__\":\n # Read image\n img = cv2.imread(\"./c73.png\")\n a = np.array([[\n 1458.4429931640625, 145.316650390625, 1554.5313720703125,\n 176.924560546875, 1\n ]],\n dtype=float32)\n b = np.array([[\n 1734.0457763671875, 191.89208984375, 1829.681640625, 222.283935546875,\n 1\n ]],\n dtype=float32)\n # Draw rectangle\n j = 0\n for i in a:\n if i[4] > 0.85:\n cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),\n (50, 205, 50), 4)\n # cut = img[int(i[0]):int(i[2]), int(i[1]):int(i[3])]\n # cv2.imwrite('./pic/' + str(j) + '.png', cut)\n # j += 1\n for i in b:\n if i[4] > 0.85:\n cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),\n (254, 67, 101), 4)\n # Display cropped image\n width = int(img.shape[1] / 4)\n height = int(img.shape[0] / 4)\n dim = (width, height)\n # resize image\n resized = cv2.resize(img, dim)\n\n # save the image\n cv2.imshow(\"Image\", resized)\n cv2.waitKey(0)\n\n cv2.imwrite('./c73_.png', img)\n" ]
[ [ "torch.cat" ], [ "numpy.array" ] ]
Drob-AI/The-Observer
[ "80540be5fa5e9a6a7b9123702b701998105a48a6" ]
[ "src/mod_suggest/tree_trainer.py" ]
[ "import random\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn import neighbors\nfrom sklearn import svm\nfrom sklearn import grid_search\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn import cross_validation\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import mean_squared_error\n\n\ndef split_sets(dataset, test_set_len):\n test_set = []\n original_dataset_len = len(dataset)\n dataset = list(dataset)\n while(len(test_set) < original_dataset_len * test_set_len):\n index = random.randint(0, len(dataset) - 1)\n test_set.append(dataset[index])\n del dataset[index]\n\n return ( test_set, dataset)\n\ndef split_train_result_set(set, feature_index):\n transponed = np.array(set).T\n results_set = transponed[feature_index]\n\n transponed = list(transponed)\n del transponed[feature_index]\n\n x_set = np.array(transponed).T\n results_set = results_set.T\n\n return(x_set, results_set)\n\n\ndef train_classifier_tree(dataset, feature_index, stats):\n # print(dataset, feature_index)\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n # if( stats[feature_index]['type'] == 'string'):\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(x_train, y_train)\n print(accuracy_score(clf.predict(x_train_test), y_train_test))\n print(len(test_set), len(train_set))\n # else:\n # clf = tree.DecisionTreeClassifier()\n # print(x_train, y_train)\n # clf = clf.fit(x_train, y_train)\n # print(\"MSE\", mean_squared_error(clf.predict(x_train_test), y_train_test))\n # print(len(test_set), len(train_set))\n\ndef train_classifier_tree2(dataset, feature_index, stats):\n # print(dataset, feature_index)\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n n_estimators=[100, 180]\n min_samples_split=[2, 10]\n\n clf = RandomForestClassifier()\n # clf = tree.DecisionTreeClassifier()\n # clf = clf.fit(x_train, y_train)\n\n nFolds = 5\n param_grid = dict(n_estimators=n_estimators, min_samples_split=min_samples_split)\n\n # param_grid = dict(max_depth=[10, 11, 12, 15],)\n\n cv = cross_validation.StratifiedKFold(y_train, nFolds)\n grid = GridSearchCV(clf, param_grid=param_grid,cv=cv)\n grid.fit(x_train, y_train)\n\n print(accuracy_score(grid.predict(x_train_test), y_train_test))\n\n\n print(len(test_set), len(train_set))\n\n\ndef train_knn(dataset, feature_index, stats):\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n knn = neighbors.KNeighborsClassifier()\n knn = knn.fit(x_train, y_train)\n\n\n print(accuracy_score(knn.predict(x_train_test), y_train_test))\n\n print(len(test_set), len(train_set))\n\n\ndef train_knn2(dataset, feature_index, stats):\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n nFolds = 4\n metrics = ['minkowski','euclidean','manhattan']\n weights = ['uniform','distance']\n numNeighbors = np.arange(5, 10)\n param_grid = dict(metric=metrics,weights=weights,n_neighbors=numNeighbors)\n cv = cross_validation.StratifiedKFold(y_train, nFolds)\n grid = GridSearchCV(neighbors.KNeighborsClassifier(), param_grid=param_grid,cv=cv)\n grid.fit(x_train, y_train)\n\n print(accuracy_score(grid.predict(x_train_test), y_train_test))\n\ndef train_classifier_svm(dataset, feature_index, stats):\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n clf = svm.SVC()\n clf = clf.fit(x_train, y_train)\n print(accuracy_score(clf.predict(x_train_test), y_train_test))\n print(len(test_set), len(train_set))\n\ndef train_classifier_svm2(dataset, feature_index, stats):\n test_set, train_set = split_sets(dataset, 0.1)\n x_train, y_train = split_train_result_set(train_set, feature_index)\n x_train_test, y_train_test = split_train_result_set(test_set, feature_index)\n\n param_grid = [{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']}]\n grid = GridSearchCV(svm.SVC(), param_grid=param_grid)\n\n grid.fit(x_train, y_train)\n\n print(accuracy_score(grid.predict(x_train_test), y_train_test))" ]
[ [ "sklearn.cross_validation.StratifiedKFold", "sklearn.ensemble.RandomForestClassifier", "numpy.arange", "sklearn.neighbors.KNeighborsClassifier", "sklearn.grid_search.GridSearchCV", "sklearn.tree.DecisionTreeClassifier", "sklearn.svm.SVC", "numpy.array" ] ]
Chop1/mmtracking
[ "fd62f33e399f9931b94dbdb33e201ae348af2cd8", "fd62f33e399f9931b94dbdb33e201ae348af2cd8", "fd62f33e399f9931b94dbdb33e201ae348af2cd8" ]
[ "mmtrack/datasets/sot_imagenet_vid_dataset.py", "mmtrack/datasets/builder.py", "mmtrack/models/track_heads/stark_head.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nfrom mmdet.datasets import DATASETS\n\nfrom mmtrack.datasets.parsers import CocoVID\nfrom .base_sot_dataset import BaseSOTDataset\n\n\[email protected]_module()\nclass SOTImageNetVIDDataset(BaseSOTDataset):\n \"\"\"ImageNet VID dataset of single object tracking.\n\n The dataset only support training mode.\n \"\"\"\n\n def __init__(self, ann_file, *args, **kwargs):\n \"\"\"Initialization of SOT dataset class.\n\n Args:\n ann_file (str): The coco-format annotation file of ImageNet VID\n Dataset. It will be loaded and parsed in the\n `self.load_data_infos` function.\n \"\"\"\n self.coco = CocoVID(ann_file)\n super().__init__(*args, **kwargs)\n\n def load_data_infos(self, split='train'):\n \"\"\"Load dataset information.\n\n Args:\n split (str, optional): The split of dataset. Defaults to 'train'.\n\n Returns:\n list[int]: The length of the list is the number of instances. The\n elemment in the list is instance ID in coco API.\n \"\"\"\n data_infos = list(self.coco.instancesToImgs.keys())\n return data_infos\n\n def get_bboxes_from_video(self, video_ind):\n \"\"\"Get bbox annotation about the instance in a video. Considering\n `get_bboxes_from_video` in `SOTBaseDataset` is not compatible with\n `SOTImageNetVIDDataset`, we oveload this function though it's not\n called by `self.get_ann_infos_from_video`.\n\n Args:\n video_ind (int): video index. Each video_ind denotes an instance.\n\n Returns:\n ndarray: in [N, 4] shape. The bbox is in (x, y, w, h) format.\n \"\"\"\n instance_id = self.data_infos[video_ind]\n img_ids = self.coco.instancesToImgs[instance_id]\n bboxes = []\n for img_id in img_ids:\n for ann in self.coco.imgToAnns[img_id]:\n if ann['instance_id'] == instance_id:\n bboxes.append(ann['bbox'])\n bboxes = np.array(bboxes).reshape(-1, 4)\n return bboxes\n\n def get_img_infos_from_video(self, video_ind):\n \"\"\"Get image information in a video.\n\n Args:\n video_ind (int): video index\n\n Returns:\n dict: {'filename': list[str], 'frame_ids':ndarray, 'video_id':int}\n \"\"\"\n instance_id = self.data_infos[video_ind]\n img_ids = self.coco.instancesToImgs[instance_id]\n frame_ids = []\n img_names = []\n # In ImageNetVID dataset, frame_ids may not be continuous.\n for img_id in img_ids:\n frame_ids.append(self.coco.imgs[img_id]['frame_id'])\n img_names.append(self.coco.imgs[img_id]['file_name'])\n img_infos = dict(\n filename=img_names, frame_ids=frame_ids, video_id=video_ind)\n return img_infos\n\n def get_ann_infos_from_video(self, video_ind):\n \"\"\"Get annotation information in a video.\n Note: We overload this function for speed up loading video information.\n\n Args:\n video_ind (int): video index. Each video_ind denotes an instance.\n\n Returns:\n dict: {'bboxes': ndarray in (N, 4) shape, 'bboxes_isvalid':\n ndarray, 'visible':ndarray}. The bbox is in\n (x1, y1, x2, y2) format.\n \"\"\"\n instance_id = self.data_infos[video_ind]\n img_ids = self.coco.instancesToImgs[instance_id]\n bboxes = []\n visible = []\n for img_id in img_ids:\n for ann in self.coco.imgToAnns[img_id]:\n if ann['instance_id'] == instance_id:\n bboxes.append(ann['bbox'])\n visible.append(not ann.get('occluded', False))\n bboxes = np.array(bboxes).reshape(-1, 4)\n bboxes_isvalid = (bboxes[:, 2] > self.bbox_min_size) & (\n bboxes[:, 3] > self.bbox_min_size)\n bboxes[:, 2:] += bboxes[:, :2]\n visible = np.array(visible, dtype=np.bool_) & bboxes_isvalid\n ann_infos = ann_infos = dict(\n bboxes=bboxes, bboxes_isvalid=bboxes_isvalid, visible=visible)\n return ann_infos\n\n def get_visibility_from_video(self, video_ind):\n \"\"\"Get the visible information in a video.\n\n Considering `get_visibility_from_video` in `SOTBaseDataset` is not\n compatible with `SOTImageNetVIDDataset`, we oveload this function\n though it's not called by `self.get_ann_infos_from_video`.\n \"\"\"\n instance_id = self.data_infos[video_ind]\n img_ids = self.coco.instancesToImgs[instance_id]\n visible = []\n for img_id in img_ids:\n for ann in self.coco.imgToAnns[img_id]:\n if ann['instance_id'] == instance_id:\n visible.append(not ann.get('occluded', False))\n visible_info = dict(visible=np.array(visible, dtype=np.bool_))\n return visible_info\n\n def get_len_per_video(self, video_ind):\n \"\"\"Get the number of frames in a video.\"\"\"\n instance_id = self.data_infos[video_ind]\n return len(self.coco.instancesToImgs[instance_id])\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport random\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import TORCH_VERSION, digit_version\nfrom mmdet.datasets.samplers import (DistributedGroupSampler,\n DistributedSampler, GroupSampler)\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import ConcatDataset\nfrom torch.utils.data.sampler import RandomSampler\n\nfrom mmtrack.datasets.samplers.quota_sampler import DistributedQuotaSampler\nfrom .base_sot_dataset import BaseSOTDataset\nfrom .samplers import DistributedVideoSampler, SOTVideoSampler\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n samples_per_epoch=None,\n dist=True,\n shuffle=True,\n seed=None,\n persistent_workers=False,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n samples_per_epoch (int | None, Optional): The number of samples per\n epoch. If equal to -1, using all samples in the datasets per epoch.\n Otherwise, using the `samples_per_epoch` samples. Default: None.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int, Optional): Seed to be used. Default: None.\n persistent_workers (bool): If True, the data loader will not shutdown\n the worker processes after a dataset has been consumed once.\n This allows to maintain the workers `Dataset` instances alive.\n This argument is only valid when PyTorch>=1.7.0. Default: False.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n # We set specific data sampler for SOT datasets.\n is_sot_dataset = isinstance(dataset, BaseSOTDataset) or (\n isinstance(dataset, ConcatDataset)\n and isinstance(dataset.datasets[0], BaseSOTDataset))\n if dist:\n # ----- distributed train mode ------\n if shuffle:\n if is_sot_dataset:\n if samples_per_epoch is None:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=True)\n else:\n # get fixed number of samples per epoch to train\n # sampling with no-replacement mode\n sampler = DistributedQuotaSampler(\n dataset,\n samples_per_epoch,\n world_size,\n rank,\n replacement=False)\n else:\n sampler = DistributedGroupSampler(dataset, samples_per_gpu,\n world_size, rank)\n # ----- distributed test mode ------\n else:\n if hasattr(dataset, 'load_as_video') and dataset.load_as_video:\n # sample videos\n sampler = DistributedVideoSampler(\n dataset, world_size, rank, shuffle=False)\n else:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=False)\n\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n # ----- non-distributed train mode ------\n if shuffle:\n if is_sot_dataset:\n if samples_per_epoch is None:\n sampler = RandomSampler(dataset)\n else:\n # get fixed number of samples per epoch to train\n # sampling with replacement mode\n sampler = RandomSampler(\n dataset,\n replacement=True,\n num_samples=samples_per_epoch)\n else:\n sampler = GroupSampler(dataset, samples_per_gpu)\n # ----- non-distributed test mode ------\n else:\n sampler = SOTVideoSampler(dataset) if is_sot_dataset else None\n\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n if (TORCH_VERSION != 'parrots'\n and digit_version(TORCH_VERSION) >= digit_version('1.7.0')):\n kwargs['persistent_workers'] = persistent_workers\n elif persistent_workers is True:\n warnings.warn('persistent_workers is invalid because your pytorch '\n 'version is lower than 1.7.0')\n\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=False,\n worker_init_fn=init_fn,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n torch.manual_seed(worker_seed)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks import ConvModule\nfrom mmcv.cnn.bricks.transformer import build_positional_encoding\nfrom mmcv.runner.base_module import BaseModule\nfrom mmdet.models import HEADS\nfrom mmdet.models.builder import build_head, build_loss\nfrom mmdet.models.utils import Transformer, build_transformer\nfrom mmdet.models.utils.builder import TRANSFORMER\nfrom torch import nn\n\n\[email protected]_module()\nclass CornerPredictorHead(BaseModule):\n \"\"\"Corner Predictor head.\n\n Args:\n inplanes (int): input channel\n channel (int): the output channel of the first conv block\n feat_size (int): the size of feature map\n stride (int): the stride of feature map from the backbone\n \"\"\"\n\n def __init__(self, inplanes, channel, feat_size=20, stride=16):\n super(CornerPredictorHead, self).__init__()\n self.feat_size = feat_size\n self.stride = stride\n self.img_size = self.feat_size * self.stride\n\n def conv_module(in_planes, out_planes, kernel_size=3, padding=1):\n # The module's pipeline: Conv -> BN -> ReLU.\n return ConvModule(\n in_channels=in_planes,\n out_channels=out_planes,\n kernel_size=kernel_size,\n padding=padding,\n bias=True,\n norm_cfg=dict(type='BN', requires_grad=True),\n act_cfg=dict(type='ReLU'),\n inplace=True)\n\n # top-left corner\n self.tl_corner_pred = nn.Sequential(\n conv_module(inplanes, channel), conv_module(channel, channel // 2),\n conv_module(channel // 2, channel // 4),\n conv_module(channel // 4, channel // 8),\n nn.Conv2d(channel // 8, 1, kernel_size=1))\n # bottom-right corner\n self.br_corner_pred = nn.Sequential(\n conv_module(inplanes, channel), conv_module(channel, channel // 2),\n conv_module(channel // 2, channel // 4),\n conv_module(channel // 4, channel // 8),\n nn.Conv2d(channel // 8, 1, kernel_size=1))\n\n def forward(self, x):\n \"\"\"Forward pass with input x.\n\n Args:\n x (Tensor): of shape (bs, C, H, W).\n Returns:\n (Tensor): bbox of shape (bs, 4) in (tl_x, tl_y, br_x, br_y) format.\n \"\"\"\n score_map_tl, score_map_br = self.get_score_map(x)\n coorx_tl, coory_tl = self.soft_argmax(score_map_tl)\n coorx_br, coory_br = self.soft_argmax(score_map_br)\n return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1)\n\n def get_score_map(self, x):\n \"\"\"Score map branch.\n\n Args:\n x (Tensor): of shape (bs, C, H, W).\n Returns:\n score_map_tl (Tensor): of shape (bs, 1, H, W). The score map of top\n left corner of tracking bbox.\n score_map_br (Tensor): of shape (bs, 1, H, W). The score map of\n bottom right corner of tracking bbox.\n \"\"\"\n score_map_tl = self.tl_corner_pred(x)\n score_map_br = self.br_corner_pred(x)\n return score_map_tl, score_map_br\n\n def soft_argmax(self, score_map):\n \"\"\"Get soft-argmax coordinate for the given score map.\n\n Args:\n score_map (self.feat_size, self.feat_size): the last score map\n in bbox_head branch\n\n Returns:\n exp_x (Tensor): of shape (bs, 1). The values are in range\n [0, self.feat_size * self.stride]\n exp_y (Tensor): of shape (bs, 1). The values are in range\n [0, self.feat_size * self.stride]\n \"\"\"\n # (bs, feat_size * feat_size)\n score_vec = score_map.view((-1, self.feat_size * self.feat_size))\n prob_vec = nn.functional.softmax(score_vec, dim=1)\n\n if not hasattr(self, 'coord_x'):\n # generate coordinates and indexes\n self.indice = torch.arange(\n 0, self.feat_size, device=score_map.device).view(\n -1, 1) * self.stride\n # generate mesh-grid\n self.coord_x = self.indice.repeat((self.feat_size, 1)) \\\n .view((self.feat_size * self.feat_size,)).float()\n self.coord_y = self.indice.repeat((1, self.feat_size)) \\\n .view((self.feat_size * self.feat_size,)).float()\n\n soft_argmax_x = torch.sum((self.coord_x * prob_vec), dim=1)\n soft_argmax_y = torch.sum((self.coord_y * prob_vec), dim=1)\n return soft_argmax_x, soft_argmax_y\n\n\[email protected]_module()\nclass ScoreHead(nn.Module):\n \"\"\"Predict the confidence score of target in current frame.\n\n Cascade multiple FC layer and empose relu on the output of last layer.\n\n Args:\n input_dim (int): the dim of input.\n hidden_dim (int): the dim of hidden layers.\n output_dim (int): the dim of output.\n num_layers (int): the number of FC layers.\n use_bn (bool, optional): whether to use BN after each FC layer.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n input_dim,\n hidden_dim,\n output_dim,\n num_layers,\n use_bn=False):\n super(ScoreHead, self).__init__()\n self.num_layers = num_layers\n hidden_dims = [hidden_dim] * (num_layers - 1)\n if use_bn:\n self.layers = nn.ModuleList(\n nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n for n, k in zip([input_dim] + hidden_dims, hidden_dims +\n [output_dim]))\n else:\n self.layers = nn.ModuleList(\n nn.Linear(n, k)\n for n, k in zip([input_dim] + hidden_dims, hidden_dims +\n [output_dim]))\n\n def forward(self, x):\n \"\"\"Forward function for `ScoreHead`.\n\n Args:\n x (Tensor): of shape (1, bs, num_query, c).\n\n Returns:\n Tensor: of shape (bs, num_query, 1).\n \"\"\"\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)\n\n\[email protected]_module()\nclass StarkTransformer(Transformer):\n \"\"\"The transformer head used in STARK. `STARK.\n\n <https://arxiv.org/abs/2103.17154>`_.\n\n This module follows the official DETR implementation.\n See `paper: End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for details.\n\n Args:\n encoder (`mmcv.ConfigDict` | Dict): Config of\n TransformerEncoder. Defaults to None.\n decoder ((`mmcv.ConfigDict` | Dict)): Config of\n TransformerDecoder. Defaults to None\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Defaults to None.\n \"\"\"\n\n def __init__(self, encoder=None, decoder=None, init_cfg=None):\n super(StarkTransformer, self).__init__(\n encoder=encoder, decoder=decoder, init_cfg=init_cfg)\n\n def forward(self, x, mask, query_embed, pos_embed):\n \"\"\"Forward function for `StarkTransformer`.\n\n The difference with transofrmer module in `MMCV` is the input shape.\n The sizes of template feature maps and search feature maps are\n different. Thus, we must flatten and concatenate them outside this\n module. The `MMCV` flatten the input features inside tranformer module.\n\n Args:\n x (Tensor): Input query with shape (feats_flatten_len, bs, c)\n where c = embed_dims.\n mask (Tensor): The key_padding_mask used for encoder and decoder,\n with shape (bs, feats_flatten_len).\n query_embed (Tensor): The query embedding for decoder, with shape\n (num_query, c).\n pos_embed (Tensor): The positional encoding for encoder and\n decoder, with shape (feats_flatten_len, bs, c).\n\n Here, 'feats_flatten_len' = z_feat_h*z_feat_w*2 + \\\n x_feat_h*x_feat_w.\n 'z_feat_h' and 'z_feat_w' denote the height and width of the\n template features respectively.\n 'x_feat_h' and 'x_feat_w' denote the height and width of search\n features respectively.\n Returns:\n tuple[Tensor]: results of decoder containing the following tensor.\n - out_dec: Output from decoder. If return_intermediate_dec \\\n is True, output has shape [num_dec_layers, bs,\n num_query, embed_dims], else has shape [1, bs, \\\n num_query, embed_dims].\n Here, return_intermediate_dec=False\n - enc_mem: Output results from encoder, with shape \\\n (feats_flatten_len, bs, embed_dims).\n \"\"\"\n _, bs, _ = x.shape\n query_embed = query_embed.unsqueeze(1).repeat(\n 1, bs, 1) # [num_query, embed_dims] -> [num_query, bs, embed_dims]\n\n enc_mem = self.encoder(\n query=x,\n key=None,\n value=None,\n query_pos=pos_embed,\n query_key_padding_mask=mask)\n target = torch.zeros_like(query_embed)\n # out_dec: [num_dec_layers, num_query, bs, embed_dims]\n out_dec = self.decoder(\n query=target,\n key=enc_mem,\n value=enc_mem,\n key_pos=pos_embed,\n query_pos=query_embed,\n key_padding_mask=mask)\n out_dec = out_dec.transpose(1, 2)\n return out_dec, enc_mem\n\n\[email protected]_module()\nclass StarkHead(BaseModule):\n \"\"\"STARK head module for bounding box regression and prediction of\n confidence score of tracking bbox.\n\n This module is proposed in\n \"Learning Spatio-Temporal Transformer for Visual Tracking\".\n `STARK <https://arxiv.org/abs/2103.17154>`_.\n\n Args:\n num_query (int): Number of query in transformer.\n transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer.\n Default: None.\n positional_encoding (obj:`mmcv.ConfigDict`|dict):\n Config for position encoding.\n bbox_head (obj:`mmcv.ConfigDict`|dict, optional): Config for bbox head.\n Defaults to None.\n cls_head (obj:`mmcv.ConfigDict`|dict, optional): Config for\n classification head. Defaults to None.\n loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the\n classification loss. Default `CrossEntropyLoss`.\n loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the bbox\n regression loss. Default `L1Loss`.\n loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the bbox\n regression iou loss. Default `GIoULoss`.\n tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of\n transformer head.\n test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of\n transformer head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n \"\"\"\n\n def __init__(self,\n num_query=1,\n transformer=None,\n positional_encoding=dict(\n type='SinePositionalEncoding',\n num_feats=128,\n normalize=True),\n bbox_head=None,\n cls_head=None,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0,\n ),\n loss_bbox=dict(type='L1Loss', loss_weight=5.0),\n loss_iou=dict(type='GIoULoss', loss_weight=2.0),\n train_cfg=None,\n test_cfg=None,\n init_cfg=None,\n frozen_modules=None,\n **kwargs):\n super(StarkHead, self).__init__(init_cfg=init_cfg)\n self.transformer = build_transformer(transformer)\n self.positional_encoding = build_positional_encoding(\n positional_encoding)\n assert bbox_head is not None\n self.bbox_head = build_head(bbox_head)\n if cls_head is None:\n # the stage-1 training\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_iou = build_loss(loss_iou)\n self.cls_head = None\n else:\n # the stage-2 training\n self.cls_head = build_head(cls_head)\n self.loss_cls = build_loss(loss_cls)\n self.embed_dims = self.transformer.embed_dims\n self.num_query = num_query\n self.query_embedding = nn.Embedding(self.num_query, self.embed_dims)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.fp16_enabled = False\n\n if frozen_modules is not None:\n assert isinstance(frozen_modules, list)\n for module in frozen_modules:\n m = getattr(self, module)\n # TODO: Study the influence of freezing BN running_mean and\n # running_variance of `frozen_modules` in the 2nd stage train.\n # The official code doesn't freeze these.\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self):\n \"\"\"Parameters initialization.\"\"\"\n self.transformer.init_weights()\n\n def _merge_template_search(self, inputs):\n \"\"\"Merge the data of template and search images.\n The merge includes 3 steps: flatten, premute and concatenate.\n Note: the data of search image must be in the last place.\n\n args:\n inputs (list[dict(Tensor)]):\n The list contains the data of template and search images.\n The dict is in the following format:\n - 'feat': (N, C, H, W)\n - 'mask': (N, H, W)\n - 'pos_embed': (N, C, H, W)\n\n Return:\n dict(Tensor):\n - 'feat': in [data_flatten_len, N, C] format\n - 'mask': in [N, data_flatten_len] format\n - 'pos_embed': in [data_flatten_len, N, C]\n format\n\n Here, 'data_flatten_len' = z_h*z_w*2 + x_h*x_w.\n 'z_h' and 'z_w' denote the height and width of the\n template images respectively.\n 'x_h' and 'x_w' denote the height and width of search image\n respectively.\n \"\"\"\n seq_dict = defaultdict(list)\n # flatten and permute\n for input_dic in inputs:\n for name, x in input_dic.items():\n if name == 'mask':\n seq_dict[name].append(x.flatten(1))\n else:\n seq_dict[name].append(\n x.flatten(2).permute(2, 0, 1).contiguous())\n # concatenate\n for name, x in seq_dict.items():\n if name == 'mask':\n seq_dict[name] = torch.cat(x, dim=1)\n else:\n seq_dict[name] = torch.cat(x, dim=0)\n return seq_dict\n\n def forward_bbox_head(self, feat, enc_mem):\n \"\"\"\n Args:\n feat: output embeddings of decoder, with shape\n (1, bs, num_query, c).\n enc_mem: output embeddings of encoder, with shape\n (feats_flatten_len, bs, C)\n\n Here, 'feats_flatten_len' = z_feat_h*z_feat_w*2 + \\\n x_feat_h*x_feat_w.\n 'z_feat_h' and 'z_feat_w' denote the height and width of the\n template features respectively.\n 'x_feat_h' and 'x_feat_w' denote the height and width of search\n features respectively.\n Returns:\n Tensor: of shape (bs, num_query, 4). The bbox is in\n [tl_x, tl_y, br_x, br_y] format.\n \"\"\"\n z_feat_len = self.bbox_head.feat_size**2\n # the output of encoder for the search image\n x_feat = enc_mem[-z_feat_len:].transpose(\n 0, 1) # (bs, x_feat_h*x_feat_w, c)\n dec_embed = feat.squeeze(0).transpose(1, 2) # (bs, c, num_query)\n attention = torch.matmul(\n x_feat, dec_embed) # (bs, x_feat_h*x_feat_w, num_query)\n bbox_feat = (x_feat.unsqueeze(-1) * attention.unsqueeze(-2))\n\n # (bs, x_feat_h*x_feat_w, c, num_query) --> (bs, num_query, c, x_feat_h*x_feat_w) # noqa\n bbox_feat = bbox_feat.permute((0, 3, 2, 1)).contiguous()\n bs, num_query, dim, _ = bbox_feat.size()\n bbox_feat = bbox_feat.view(-1, dim, self.bbox_head.feat_size,\n self.bbox_head.feat_size)\n # run the corner prediction head\n outputs_coord = self.bbox_head(bbox_feat)\n outputs_coord = outputs_coord.view(bs, num_query, 4)\n return outputs_coord\n\n def forward(self, inputs):\n \"\"\"\"\n Args:\n inputs (list[dict(tuple(Tensor))]): The list contains the\n multi-level features and masks of template or search images.\n - 'feat': (tuple(Tensor)), the Tensor is of shape\n (bs, c, h//stride, w//stride).\n - 'mask': (Tensor), of shape (bs, h, w).\n\n Here, `h` and `w` denote the height and width of input\n image respectively. `stride` is the stride of feature map.\n\n Returns:\n (dict):\n - 'pred_bboxes': (Tensor) of shape (bs, num_query, 4), in\n [tl_x, tl_y, br_x, br_y] format\n - 'pred_logit': (Tensor) of shape (bs, num_query, 1)\n \"\"\"\n # 1. preprocess inputs for transformer\n all_inputs = []\n for input in inputs:\n feat = input['feat'][0]\n feat_size = feat.shape[-2:]\n mask = F.interpolate(\n input['mask'][None].float(), size=feat_size).to(torch.bool)[0]\n pos_embed = self.positional_encoding(mask)\n all_inputs.append(dict(feat=feat, mask=mask, pos_embed=pos_embed))\n all_inputs = self._merge_template_search(all_inputs)\n\n # 2. forward transformer head\n # outs_dec is in (1, bs, num_query, c) shape\n # enc_mem is in (feats_flatten_len, bs, c) shape\n outs_dec, enc_mem = self.transformer(all_inputs['feat'],\n all_inputs['mask'],\n self.query_embedding.weight,\n all_inputs['pos_embed'])\n\n # 3. forward bbox head and classification head\n track_results = {}\n if not self.training:\n if self.cls_head is not None:\n # forward the classification head\n track_results['pred_logits'] = self.cls_head(outs_dec)\n track_results['pred_bboxes'] = self.forward_bbox_head(\n outs_dec, enc_mem)\n else:\n if self.cls_head is not None:\n # stage-1 training: forward the classification head\n track_results['pred_logits'] = self.cls_head(outs_dec)\n else:\n # stage-2 training: forward the box prediction head\n track_results['pred_bboxes'] = self.forward_bbox_head(\n outs_dec, enc_mem)\n return track_results\n\n def loss(self, track_results, gt_bboxes, gt_labels, img_size=None):\n \"\"\"Compute loss.\n\n Args:\n track_results (dict): it may contains the following keys:\n - 'pred_bboxes': bboxes of (N, num_query, 4) shape in\n [tl_x, tl_y, br_x, br_y] format.\n - 'pred_logits': bboxes of (N, num_query, 1) shape.\n gt_bboxes (list[Tensor]): ground truth bboxes for search images\n with shape (N, 5) in [0., tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): ground truth labels for\n search images with shape (N, 2).\n img_size (tuple, optional): the size (h, w) of original\n search image. Defaults to None.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components.\n \"\"\"\n losses = dict()\n\n if self.cls_head is None:\n # the stage-1 training\n assert img_size is not None\n pred_bboxes = track_results['pred_bboxes'][:, 0] # shape [N, 4]\n pred_bboxes[:, 0:4:2] = pred_bboxes[:, 0:4:2] / float(img_size[1])\n pred_bboxes[:, 1:4:2] = pred_bboxes[:, 1:4:2] / float(img_size[0])\n\n gt_bboxes = torch.cat(gt_bboxes, dim=0).type(torch.float32)[:, 1:]\n gt_bboxes[:, 0:4:2] = gt_bboxes[:, 0:4:2] / float(img_size[1])\n gt_bboxes[:, 1:4:2] = gt_bboxes[:, 1:4:2] / float(img_size[0])\n gt_bboxes = gt_bboxes.clamp(0., 1.)\n\n # regression IoU loss, default GIoU loss\n if (pred_bboxes[:, :2] >= pred_bboxes[:, 2:]).any() or (\n gt_bboxes[:, :2] >= gt_bboxes[:, 2:]).any():\n # the first several iterations of train may return invalid\n # bbox coordinates.\n losses['loss_iou'] = (pred_bboxes - gt_bboxes).sum() * 0.0\n else:\n losses['loss_iou'] = self.loss_iou(pred_bboxes, gt_bboxes)\n # regression L1 loss\n losses['loss_bbox'] = self.loss_bbox(pred_bboxes, gt_bboxes)\n else:\n # the stage-2 training\n assert gt_labels is not None\n pred_logits = track_results['pred_logits'][:, 0].squeeze()\n gt_labels = torch.cat(\n gt_labels, dim=0).type(torch.float32)[:, 1:].squeeze()\n losses['loss_cls'] = self.loss_cls(pred_logits, gt_labels)\n\n return losses\n" ]
[ [ "numpy.array" ], [ "torch.manual_seed", "torch.utils.data.sampler.RandomSampler", "numpy.random.seed" ], [ "torch.nn.BatchNorm1d", "torch.nn.functional.softmax", "torch.cat", "torch.nn.Conv2d", "torch.zeros_like", "torch.sum", "torch.nn.Embedding", "torch.matmul", "torch.nn.Linear", "torch.arange", "torch.stack" ] ]
8M-An/greyatom-python-for-data-science
[ "5c6dd729120aa003de0095cddd2093b0a16ecab9" ]
[ "numpy/code.py" ]
[ "# --------------\n# Importing header files\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n#Reading file\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\n#Code starts here\r\n\r\ncensus=np.concatenate((data,new_record))\r\n\r\n\r\nage=census[:,0]\r\nprint(age)\r\nmax_age,min_age=age.max(),age.min()\r\nprint(max_age)\r\nprint(min_age)\r\nage_mean=age.mean()\r\nage_std=np.std(age)\r\nprint(age_mean)\r\nprint(age_std)\r\n\r\n\r\nrace_0=census[census[:,2]==0]\r\nrace_1=census[census[:,2]==1]\r\nrace_2=census[census[:,2]==2]\r\nrace_3=census[census[:,2]==3]\r\nrace_4=census[census[:,2]==4]\r\nlen_0,len_1,len_2,len_3,len_4=len(race_0),len(race_1),len(race_2),len(race_3),len(race_4)\r\nprint(len_0,len_1,len_2,len_3,len_4)\r\nlenlist=[len_0,len_1,len_2,len_3,len_4]\r\nminority_race=min(lenlist)\r\nfor i in range(len(lenlist)):\r\n if minority_race==lenlist[i]:\r\n minority_race=i\r\n break\r\nprint(minority_race) \r\n\r\n\r\nsenior_citizens=census[census[:,0]>60]\r\nworking_hours_sum=senior_citizens[:,6].sum()\r\nsenior_citizens_len=len(senior_citizens)\r\navg_working_hours=working_hours_sum/senior_citizens_len\r\nprint(working_hours_sum,avg_working_hours)\r\n\r\n\r\nhigh=census[census[:,1]>10]\r\nlow=census[census[:,1]<=10]\r\navg_pay_high=high[:,7].mean()\r\navg_pay_low=low[:,7].mean()\r\nprint(avg_pay_high,avg_pay_low)\n\n\n" ]
[ [ "numpy.concatenate", "numpy.std", "numpy.genfromtxt" ] ]
eriknw/numpy
[ "d13a4f06ea84da112d5069b3fde148e307e7f94c" ]
[ "numpy/random/tests/test_generator_mt19937.py" ]
[ "import sys\n\nimport pytest\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\nfrom numpy.testing import (\n assert_, assert_raises, assert_equal, assert_allclose,\n assert_warns, assert_no_warnings, assert_array_equal,\n assert_array_almost_equal, suppress_warnings)\n\nfrom numpy.random import Generator, MT19937, SeedSequence\n\nrandom = Generator(MT19937())\n\n\[email protected](scope='module', params=[True, False])\ndef endpoint(request):\n return request.param\n\n\nclass TestSeed:\n def test_scalar(self):\n s = Generator(MT19937(0))\n assert_equal(s.integers(1000), 479)\n s = Generator(MT19937(4294967295))\n assert_equal(s.integers(1000), 324)\n\n def test_array(self):\n s = Generator(MT19937(range(10)))\n assert_equal(s.integers(1000), 465)\n s = Generator(MT19937(np.arange(10)))\n assert_equal(s.integers(1000), 465)\n s = Generator(MT19937([0]))\n assert_equal(s.integers(1000), 479)\n s = Generator(MT19937([4294967295]))\n assert_equal(s.integers(1000), 324)\n\n def test_seedsequence(self):\n s = MT19937(SeedSequence(0))\n assert_equal(s.random_raw(1), 2058676884)\n\n def test_invalid_scalar(self):\n # seed must be an unsigned 32 bit integer\n assert_raises(TypeError, MT19937, -0.5)\n assert_raises(ValueError, MT19937, -1)\n\n def test_invalid_array(self):\n # seed must be an unsigned integer\n assert_raises(TypeError, MT19937, [-0.5])\n assert_raises(ValueError, MT19937, [-1])\n assert_raises(ValueError, MT19937, [1, -2, 4294967296])\n\n def test_noninstantized_bitgen(self):\n assert_raises(ValueError, Generator, MT19937)\n\n\nclass TestBinomial:\n def test_n_zero(self):\n # Tests the corner case of n == 0 for the binomial distribution.\n # binomial(0, p) should be zero for any p in [0, 1].\n # This test addresses issue #3480.\n zeros = np.zeros(2, dtype='int')\n for p in [0, .5, 1]:\n assert_(random.binomial(0, p) == 0)\n assert_array_equal(random.binomial(zeros, p), zeros)\n\n def test_p_is_nan(self):\n # Issue #4571.\n assert_raises(ValueError, random.binomial, 1, np.nan)\n\n\nclass TestMultinomial:\n def test_basic(self):\n random.multinomial(100, [0.2, 0.8])\n\n def test_zero_probability(self):\n random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])\n\n def test_int_negative_interval(self):\n assert_(-5 <= random.integers(-5, -1) < -1)\n x = random.integers(-5, -1, 5)\n assert_(np.all(-5 <= x))\n assert_(np.all(x < -1))\n\n def test_size(self):\n # gh-3173\n p = [0.5, 0.5]\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))\n assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))\n assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,\n (2, 2, 2))\n\n assert_raises(TypeError, random.multinomial, 1, p,\n float(1))\n\n def test_invalid_prob(self):\n assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])\n assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])\n\n def test_invalid_n(self):\n assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])\n assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])\n\n def test_p_non_contiguous(self):\n p = np.arange(15.)\n p /= np.sum(p[1::3])\n pvals = p[1::3]\n random = Generator(MT19937(1432985819))\n non_contig = random.multinomial(100, pvals=pvals)\n random = Generator(MT19937(1432985819))\n contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))\n assert_array_equal(non_contig, contig)\n\n def test_multidimensional_pvals(self):\n assert_raises(ValueError, random.multinomial, 10, [[0, 1]])\n assert_raises(ValueError, random.multinomial, 10, [[0], [1]])\n assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])\n assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))\n\n\nclass TestMultivariateHypergeometric:\n\n def setup(self):\n self.seed = 8675309\n\n def test_argument_validation(self):\n # Error cases...\n\n # `colors` must be a 1-d sequence\n assert_raises(ValueError, random.multivariate_hypergeometric,\n 10, 4)\n\n # Negative nsample\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [2, 3, 4], -1)\n\n # Negative color\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [-1, 2, 3], 2)\n\n # nsample exceeds sum(colors)\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [2, 3, 4], 10)\n\n # nsample exceeds sum(colors) (edge case of empty colors)\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [], 1)\n\n # Validation errors associated with very large values in colors.\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [999999999, 101], 5, 1, 'marginals')\n\n int64_info = np.iinfo(np.int64)\n max_int64 = int64_info.max\n max_int64_index = max_int64 // int64_info.dtype.itemsize\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [max_int64_index - 100, 101], 5, 1, 'count')\n\n @pytest.mark.parametrize('method', ['count', 'marginals'])\n def test_edge_cases(self, method):\n # Set the seed, but in fact, all the results in this test are\n # deterministic, so we don't really need this.\n random = Generator(MT19937(self.seed))\n\n x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)\n assert_array_equal(x, [0, 0, 0])\n\n x = random.multivariate_hypergeometric([], 0, method=method)\n assert_array_equal(x, [])\n\n x = random.multivariate_hypergeometric([], 0, size=1, method=method)\n assert_array_equal(x, np.empty((1, 0), dtype=np.int64))\n\n x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)\n assert_array_equal(x, [0, 0, 0])\n\n x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)\n assert_array_equal(x, [3, 0, 0])\n\n colors = [1, 1, 0, 1, 1]\n x = random.multivariate_hypergeometric(colors, sum(colors),\n method=method)\n assert_array_equal(x, colors)\n\n x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,\n method=method)\n assert_array_equal(x, [[3, 4, 5]]*3)\n\n # Cases for nsample:\n # nsample < 10\n # 10 <= nsample < colors.sum()/2\n # colors.sum()/2 < nsample < colors.sum() - 10\n # colors.sum() - 10 < nsample < colors.sum()\n @pytest.mark.parametrize('nsample', [8, 25, 45, 55])\n @pytest.mark.parametrize('method', ['count', 'marginals'])\n @pytest.mark.parametrize('size', [5, (2, 3), 150000])\n def test_typical_cases(self, nsample, method, size):\n random = Generator(MT19937(self.seed))\n\n colors = np.array([10, 5, 20, 25])\n sample = random.multivariate_hypergeometric(colors, nsample, size,\n method=method)\n if isinstance(size, int):\n expected_shape = (size,) + colors.shape\n else:\n expected_shape = size + colors.shape\n assert_equal(sample.shape, expected_shape)\n assert_((sample >= 0).all())\n assert_((sample <= colors).all())\n assert_array_equal(sample.sum(axis=-1),\n np.full(size, fill_value=nsample, dtype=int))\n if isinstance(size, int) and size >= 100000:\n # This sample is large enough to compare its mean to\n # the expected values.\n assert_allclose(sample.mean(axis=0),\n nsample * colors / colors.sum(),\n rtol=1e-3, atol=0.005)\n\n def test_repeatability1(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,\n method='count')\n expected = np.array([[2, 1, 2],\n [2, 1, 2],\n [1, 1, 3],\n [2, 0, 3],\n [2, 1, 2]])\n assert_array_equal(sample, expected)\n\n def test_repeatability2(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([20, 30, 50], 50,\n size=5,\n method='marginals')\n expected = np.array([[ 9, 17, 24],\n [ 7, 13, 30],\n [ 9, 15, 26],\n [ 9, 17, 24],\n [12, 14, 24]])\n assert_array_equal(sample, expected)\n\n def test_repeatability3(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([20, 30, 50], 12,\n size=5,\n method='marginals')\n expected = np.array([[2, 3, 7],\n [5, 3, 4],\n [2, 5, 5],\n [5, 3, 4],\n [1, 5, 6]])\n assert_array_equal(sample, expected)\n\n\nclass TestSetState:\n def setup(self):\n self.seed = 1234567890\n self.rg = Generator(MT19937(self.seed))\n self.bit_generator = self.rg.bit_generator\n self.state = self.bit_generator.state\n self.legacy_state = (self.state['bit_generator'],\n self.state['state']['key'],\n self.state['state']['pos'])\n\n def test_gaussian_reset(self):\n # Make sure the cached every-other-Gaussian is reset.\n old = self.rg.standard_normal(size=3)\n self.bit_generator.state = self.state\n new = self.rg.standard_normal(size=3)\n assert_(np.all(old == new))\n\n def test_gaussian_reset_in_media_res(self):\n # When the state is saved with a cached Gaussian, make sure the\n # cached Gaussian is restored.\n\n self.rg.standard_normal()\n state = self.bit_generator.state\n old = self.rg.standard_normal(size=3)\n self.bit_generator.state = state\n new = self.rg.standard_normal(size=3)\n assert_(np.all(old == new))\n\n def test_negative_binomial(self):\n # Ensure that the negative binomial results take floating point\n # arguments without truncation.\n self.rg.negative_binomial(0.5, 0.5)\n\n\nclass TestIntegers:\n rfunc = random.integers\n\n # valid integer/boolean types\n itype = [bool, np.int8, np.uint8, np.int16, np.uint16,\n np.int32, np.uint32, np.int64, np.uint64]\n\n def test_unsupported_type(self, endpoint):\n assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)\n\n def test_bounds_checking(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, ubnd, lbnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,\n dtype=dt)\n\n assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, 1, [0],\n endpoint=endpoint, dtype=dt)\n\n def test_bounds_checking_array(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)\n\n assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [lbnd] * 2,\n [ubnd + 1] * 2, endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [1] * 2, 0,\n endpoint=endpoint, dtype=dt)\n\n def test_rng_zero_and_extremes(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n is_open = not endpoint\n\n tgt = ubnd - 1\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n\n tgt = lbnd\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n\n tgt = (lbnd + ubnd) // 2\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc([tgt], [tgt + is_open],\n size=1000, endpoint=endpoint, dtype=dt),\n tgt)\n\n def test_rng_zero_and_extremes_array(self, endpoint):\n size = 1000\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n tgt = ubnd - 1\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n tgt = lbnd\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n tgt = (lbnd + ubnd) // 2\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n def test_full_range(self, endpoint):\n # Test for ticket #1690\n\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n try:\n self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n except Exception as e:\n raise AssertionError(\"No error should have been raised, \"\n \"but one was with the following \"\n \"message:\\n\\n%s\" % str(e))\n\n def test_full_range_array(self, endpoint):\n # Test for ticket #1690\n\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n try:\n self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)\n except Exception as e:\n raise AssertionError(\"No error should have been raised, \"\n \"but one was with the following \"\n \"message:\\n\\n%s\" % str(e))\n\n def test_in_bounds_fuzz(self, endpoint):\n # Don't use fixed seed\n random = Generator(MT19937())\n\n for dt in self.itype[1:]:\n for ubnd in [4, 8, 16]:\n vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,\n endpoint=endpoint, dtype=dt)\n assert_(vals.max() < ubnd)\n assert_(vals.min() >= 2)\n\n vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,\n dtype=bool)\n assert_(vals.max() < 2)\n assert_(vals.min() >= 0)\n\n def test_scalar_array_equiv(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n size = 1000\n random = Generator(MT19937(1234))\n scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,\n dtype=dt)\n\n random = Generator(MT19937(1234))\n scalar_array = random.integers([lbnd], [ubnd], size=size,\n endpoint=endpoint, dtype=dt)\n\n random = Generator(MT19937(1234))\n array = random.integers([lbnd] * size, [ubnd] *\n size, size=size, endpoint=endpoint, dtype=dt)\n assert_array_equal(scalar, scalar_array)\n assert_array_equal(scalar, array)\n\n def test_repeatability(self, endpoint):\n import hashlib\n # We use a md5 hash of generated sequences of 1000 samples\n # in the range [0, 6) for all but bool, where the range\n # is [0, 2). Hashes are for little endian numbers.\n tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',\n 'int16': '39624ead49ad67e37545744024d2648b',\n 'int32': '5c4810373f979336c6c0c999996e47a1',\n 'int64': 'ab126c15edff26f55c50d2b7e37391ac',\n 'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',\n 'uint16': '39624ead49ad67e37545744024d2648b',\n 'uint32': '5c4810373f979336c6c0c999996e47a1',\n 'uint64': 'ab126c15edff26f55c50d2b7e37391ac',\n 'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}\n\n for dt in self.itype[1:]:\n random = Generator(MT19937(1234))\n\n # view as little endian for hash\n if sys.byteorder == 'little':\n val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,\n dtype=dt)\n else:\n val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,\n dtype=dt).byteswap()\n\n res = hashlib.md5(val.view(np.int8)).hexdigest()\n assert_(tgt[np.dtype(dt).name] == res)\n\n # bools do not depend on endianness\n random = Generator(MT19937(1234))\n val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,\n dtype=bool).view(np.int8)\n res = hashlib.md5(val).hexdigest()\n assert_(tgt[np.dtype(bool).name] == res)\n\n def test_repeatability_broadcasting(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min\n ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n # view as little endian for hash\n random = Generator(MT19937(1234))\n val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,\n dtype=dt)\n\n random = Generator(MT19937(1234))\n val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,\n dtype=dt)\n\n assert_array_equal(val, val_bc)\n\n random = Generator(MT19937(1234))\n val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,\n endpoint=endpoint, dtype=dt)\n\n assert_array_equal(val, val_bc)\n\n @pytest.mark.parametrize(\n 'bound, expected',\n [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,\n 3769704066, 1170797179, 4108474671])),\n (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,\n 3769704067, 1170797180, 4108474672])),\n (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,\n 1831631863, 1215661561, 3869512430]))]\n )\n def test_repeatability_32bit_boundary(self, bound, expected):\n for size in [None, len(expected)]:\n random = Generator(MT19937(1234))\n x = random.integers(bound, size=size)\n assert_equal(x, expected if size is not None else expected[0])\n\n def test_repeatability_32bit_boundary_broadcasting(self):\n desired = np.array([[[1622936284, 3620788691, 1659384060],\n [1417365545, 760222891, 1909653332],\n [3788118662, 660249498, 4092002593]],\n [[3625610153, 2979601262, 3844162757],\n [ 685800658, 120261497, 2694012896],\n [1207779440, 1586594375, 3854335050]],\n [[3004074748, 2310761796, 3012642217],\n [2067714190, 2786677879, 1363865881],\n [ 791663441, 1867303284, 2169727960]],\n [[1939603804, 1250951100, 298950036],\n [1040128489, 3791912209, 3317053765],\n [3155528714, 61360675, 2305155588]],\n [[ 817688762, 1335621943, 3288952434],\n [1770890872, 1102951817, 1957607470],\n [3099996017, 798043451, 48334215]]])\n for size in [None, (5, 3, 3)]:\n random = Generator(MT19937(12345))\n x = random.integers([[-1], [0], [1]],\n [2**32 - 1, 2**32, 2**32 + 1],\n size=size)\n assert_array_equal(x, desired if size is not None else desired[0])\n\n def test_int64_uint64_broadcast_exceptions(self, endpoint):\n configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),\n np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),\n (-2**63-1, -2**63-1))}\n for dtype in configs:\n for config in configs[dtype]:\n low, high = config\n high = high - endpoint\n low_a = np.array([[low]*10])\n high_a = np.array([high] * 10)\n assert_raises(ValueError, random.integers, low, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_a, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low, high_a,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_a, high_a,\n endpoint=endpoint, dtype=dtype)\n\n low_o = np.array([[low]*10], dtype=object)\n high_o = np.array([high] * 10, dtype=object)\n assert_raises(ValueError, random.integers, low_o, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low, high_o,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_o, high_o,\n endpoint=endpoint, dtype=dtype)\n\n def test_int64_uint64_corner_case(self, endpoint):\n # When stored in Numpy arrays, `lbnd` is casted\n # as np.int64, and `ubnd` is casted as np.uint64.\n # Checking whether `lbnd` >= `ubnd` used to be\n # done solely via direct comparison, which is incorrect\n # because when Numpy tries to compare both numbers,\n # it casts both to np.float64 because there is\n # no integer superset of np.int64 and np.uint64. However,\n # `ubnd` is too large to be represented in np.float64,\n # causing it be round down to np.iinfo(np.int64).max,\n # leading to a ValueError because `lbnd` now equals\n # the new `ubnd`.\n\n dt = np.int64\n tgt = np.iinfo(np.int64).max\n lbnd = np.int64(np.iinfo(np.int64).max)\n ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)\n\n # None of these function calls should\n # generate a ValueError now.\n actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert_equal(actual, tgt)\n\n def test_respect_dtype_singleton(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n dt = np.bool_ if dt is bool else dt\n\n sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert_equal(sample.dtype, dt)\n\n for dt in (bool, int, np.compat.long):\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n # gh-7284: Ensure that we get Python data types\n sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert not hasattr(sample, 'dtype')\n assert_equal(type(sample), dt)\n\n def test_respect_dtype_array(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n dt = np.bool_ if dt is bool else dt\n\n sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)\n assert_equal(sample.dtype, dt)\n sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,\n dtype=dt)\n assert_equal(sample.dtype, dt)\n\n def test_zero_size(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)\n assert sample.shape == (3, 0, 4)\n assert sample.dtype == dt\n assert self.rfunc(0, -10, 0, endpoint=endpoint,\n dtype=dt).shape == (0,)\n assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,\n (3, 0, 4))\n assert_equal(random.integers(0, -10, size=0).shape, (0,))\n assert_equal(random.integers(10, 10, size=0).shape, (0,))\n\n def test_error_byteorder(self):\n other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'\n with pytest.raises(ValueError):\n random.integers(0, 200, size=10, dtype=other_byteord_dt)\n\n # chi2max is the maximum acceptable chi-squared value.\n @pytest.mark.slow\n @pytest.mark.parametrize('sample_size,high,dtype,chi2max',\n [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25\n (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30\n (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25\n (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25\n ])\n def test_integers_small_dtype_chisquared(self, sample_size, high,\n dtype, chi2max):\n # Regression test for gh-14774.\n samples = random.integers(high, size=sample_size, dtype=dtype)\n\n values, counts = np.unique(samples, return_counts=True)\n expected = sample_size / high\n chi2 = ((counts - expected)**2 / expected).sum()\n assert chi2 < chi2max\n\n\nclass TestRandomDist:\n # Make sure the random distribution returns the correct value for a\n # given seed\n\n def setup(self):\n self.seed = 1234567890\n\n def test_integers(self):\n random = Generator(MT19937(self.seed))\n actual = random.integers(-99, 99, size=(3, 2))\n desired = np.array([[-80, -56], [41, 37], [-83, -16]])\n assert_array_equal(actual, desired)\n\n def test_integers_masked(self):\n # Test masked rejection sampling algorithm to generate array of\n # uint32 in an interval.\n random = Generator(MT19937(self.seed))\n actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)\n desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)\n assert_array_equal(actual, desired)\n\n def test_integers_closed(self):\n random = Generator(MT19937(self.seed))\n actual = random.integers(-99, 99, size=(3, 2), endpoint=True)\n desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])\n assert_array_equal(actual, desired)\n\n def test_integers_max_int(self):\n # Tests whether integers with closed=True can generate the\n # maximum allowed Python int that can be converted\n # into a C long. Previous implementations of this\n # method have thrown an OverflowError when attempting\n # to generate this integer.\n actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,\n endpoint=True)\n\n desired = np.iinfo('l').max\n assert_equal(actual, desired)\n\n def test_random(self):\n random = Generator(MT19937(self.seed))\n actual = random.random((3, 2))\n desired = np.array([[0.096999199829214, 0.707517457682192],\n [0.084364834598269, 0.767731206553125],\n [0.665069021359413, 0.715487190596693]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n random = Generator(MT19937(self.seed))\n actual = random.random()\n assert_array_almost_equal(actual, desired[0, 0], decimal=15)\n\n def test_random_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.random((3, 2))\n desired = np.array([[0.0969992 , 0.70751746],\n [0.08436483, 0.76773121],\n [0.66506902, 0.71548719]])\n assert_array_almost_equal(actual, desired, decimal=7)\n\n def test_random_float_scalar(self):\n random = Generator(MT19937(self.seed))\n actual = random.random(dtype=np.float32)\n desired = 0.0969992\n assert_array_almost_equal(actual, desired, decimal=7)\n\n def test_random_unsupported_type(self):\n assert_raises(TypeError, random.random, dtype='int32')\n\n def test_choice_uniform_replace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 4)\n desired = np.array([0, 0, 2, 2], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_nonuniform_replace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])\n desired = np.array([0, 1, 0, 1], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_uniform_noreplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 3, replace=False)\n desired = np.array([2, 0, 3], dtype=np.int64)\n assert_array_equal(actual, desired)\n actual = random.choice(4, 4, replace=False, shuffle=False)\n desired = np.arange(4, dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_nonuniform_noreplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])\n desired = np.array([0, 2, 3], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_noninteger(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(['a', 'b', 'c', 'd'], 4)\n desired = np.array(['a', 'a', 'c', 'c'])\n assert_array_equal(actual, desired)\n\n def test_choice_multidimensional_default_axis(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)\n desired = np.array([[0, 1], [0, 1], [4, 5]])\n assert_array_equal(actual, desired)\n\n def test_choice_multidimensional_custom_axis(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)\n desired = np.array([[0], [2], [4], [6]])\n assert_array_equal(actual, desired)\n\n def test_choice_exceptions(self):\n sample = random.choice\n assert_raises(ValueError, sample, -1, 3)\n assert_raises(ValueError, sample, 3., 3)\n assert_raises(ValueError, sample, [], 3)\n assert_raises(ValueError, sample, [1, 2, 3, 4], 3,\n p=[[0.25, 0.25], [0.25, 0.25]])\n assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])\n assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])\n assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])\n assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)\n # gh-13087\n assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], 2,\n replace=False, p=[1, 0, 0])\n\n def test_choice_return_shape(self):\n p = [0.1, 0.9]\n # Check scalar\n assert_(np.isscalar(random.choice(2, replace=True)))\n assert_(np.isscalar(random.choice(2, replace=False)))\n assert_(np.isscalar(random.choice(2, replace=True, p=p)))\n assert_(np.isscalar(random.choice(2, replace=False, p=p)))\n assert_(np.isscalar(random.choice([1, 2], replace=True)))\n assert_(random.choice([None], replace=True) is None)\n a = np.array([1, 2])\n arr = np.empty(1, dtype=object)\n arr[0] = a\n assert_(random.choice(arr, replace=True) is a)\n\n # Check 0-d array\n s = tuple()\n assert_(not np.isscalar(random.choice(2, s, replace=True)))\n assert_(not np.isscalar(random.choice(2, s, replace=False)))\n assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))\n assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))\n assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))\n assert_(random.choice([None], s, replace=True).ndim == 0)\n a = np.array([1, 2])\n arr = np.empty(1, dtype=object)\n arr[0] = a\n assert_(random.choice(arr, s, replace=True).item() is a)\n\n # Check multi dimensional array\n s = (2, 3)\n p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]\n assert_equal(random.choice(6, s, replace=True).shape, s)\n assert_equal(random.choice(6, s, replace=False).shape, s)\n assert_equal(random.choice(6, s, replace=True, p=p).shape, s)\n assert_equal(random.choice(6, s, replace=False, p=p).shape, s)\n assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)\n\n # Check zero-size\n assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))\n assert_equal(random.integers(0, -10, size=0).shape, (0,))\n assert_equal(random.integers(10, 10, size=0).shape, (0,))\n assert_equal(random.choice(0, size=0).shape, (0,))\n assert_equal(random.choice([], size=(0,)).shape, (0,))\n assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,\n (3, 0, 4))\n assert_raises(ValueError, random.choice, [], 10)\n\n def test_choice_nan_probabilities(self):\n a = np.array([42, 1, 2])\n p = [None, None, None]\n assert_raises(ValueError, random.choice, a, p=p)\n\n def test_choice_p_non_contiguous(self):\n p = np.ones(10) / 5\n p[1::2] = 3.0\n random = Generator(MT19937(self.seed))\n non_contig = random.choice(5, 3, p=p[::2])\n random = Generator(MT19937(self.seed))\n contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))\n assert_array_equal(non_contig, contig)\n\n def test_choice_return_type(self):\n # gh 9867\n p = np.ones(4) / 4.\n actual = random.choice(4, 2)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, replace=False)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, p=p)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, p=p, replace=False)\n assert actual.dtype == np.int64\n\n def test_choice_large_sample(self):\n import hashlib\n\n choice_hash = 'd44962a0b1e92f4a3373c23222244e21'\n random = Generator(MT19937(self.seed))\n actual = random.choice(10000, 5000, replace=False)\n if sys.byteorder != 'little':\n actual = actual.byteswap()\n res = hashlib.md5(actual.view(np.int8)).hexdigest()\n assert_(choice_hash == res)\n\n def test_bytes(self):\n random = Generator(MT19937(self.seed))\n actual = random.bytes(10)\n desired = b'\\x86\\xf0\\xd4\\x18\\xe1\\x81\\t8%\\xdd'\n assert_equal(actual, desired)\n\n def test_shuffle(self):\n # Test lists, arrays (of various dtypes), and multidimensional versions\n # of both, c-contiguous or not:\n for conv in [lambda x: np.array([]),\n lambda x: x,\n lambda x: np.asarray(x).astype(np.int8),\n lambda x: np.asarray(x).astype(np.float32),\n lambda x: np.asarray(x).astype(np.complex64),\n lambda x: np.asarray(x).astype(object),\n lambda x: [(i, i) for i in x],\n lambda x: np.asarray([[i, i] for i in x]),\n lambda x: np.vstack([x, x]).T,\n # gh-11442\n lambda x: (np.asarray([(i, i) for i in x],\n [(\"a\", int), (\"b\", int)])\n .view(np.recarray)),\n # gh-4270\n lambda x: np.asarray([(i, i) for i in x],\n [(\"a\", object, (1,)),\n (\"b\", np.int32, (1,))])]:\n random = Generator(MT19937(self.seed))\n alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])\n random.shuffle(alist)\n actual = alist\n desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])\n assert_array_equal(actual, desired)\n\n def test_shuffle_custom_axis(self):\n random = Generator(MT19937(self.seed))\n actual = np.arange(16).reshape((4, 4))\n random.shuffle(actual, axis=1)\n desired = np.array([[ 0, 3, 1, 2],\n [ 4, 7, 5, 6],\n [ 8, 11, 9, 10],\n [12, 15, 13, 14]])\n assert_array_equal(actual, desired)\n random = Generator(MT19937(self.seed))\n actual = np.arange(16).reshape((4, 4))\n random.shuffle(actual, axis=-1)\n assert_array_equal(actual, desired)\n\n def test_shuffle_axis_nonsquare(self):\n y1 = np.arange(20).reshape(2, 10)\n y2 = y1.copy()\n random = Generator(MT19937(self.seed))\n random.shuffle(y1, axis=1)\n random = Generator(MT19937(self.seed))\n random.shuffle(y2.T)\n assert_array_equal(y1, y2)\n\n def test_shuffle_masked(self):\n # gh-3263\n a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)\n b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)\n a_orig = a.copy()\n b_orig = b.copy()\n for i in range(50):\n random.shuffle(a)\n assert_equal(\n sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))\n random.shuffle(b)\n assert_equal(\n sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))\n\n def test_shuffle_exceptions(self):\n random = Generator(MT19937(self.seed))\n arr = np.arange(10)\n assert_raises(np.AxisError, random.shuffle, arr, 1)\n arr = np.arange(9).reshape((3, 3))\n assert_raises(np.AxisError, random.shuffle, arr, 3)\n assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))\n arr = [[1, 2, 3], [4, 5, 6]]\n assert_raises(NotImplementedError, random.shuffle, arr, 1)\n\n def test_permutation(self):\n random = Generator(MT19937(self.seed))\n alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n actual = random.permutation(alist)\n desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T\n actual = random.permutation(arr_2d)\n assert_array_equal(actual, np.atleast_2d(desired).T)\n \n bad_x_str = \"abcd\"\n assert_raises(np.AxisError, random.permutation, bad_x_str)\n\n bad_x_float = 1.2\n assert_raises(np.AxisError, random.permutation, bad_x_float)\n\n random = Generator(MT19937(self.seed))\n integer_val = 10\n desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]\n\n actual = random.permutation(integer_val)\n assert_array_equal(actual, desired)\n\n def test_permutation_custom_axis(self):\n a = np.arange(16).reshape((4, 4))\n desired = np.array([[ 0, 3, 1, 2],\n [ 4, 7, 5, 6],\n [ 8, 11, 9, 10],\n [12, 15, 13, 14]])\n random = Generator(MT19937(self.seed))\n actual = random.permutation(a, axis=1)\n assert_array_equal(actual, desired)\n random = Generator(MT19937(self.seed))\n actual = random.permutation(a, axis=-1)\n assert_array_equal(actual, desired)\n\n def test_permutation_exceptions(self):\n random = Generator(MT19937(self.seed))\n arr = np.arange(10)\n assert_raises(np.AxisError, random.permutation, arr, 1)\n arr = np.arange(9).reshape((3, 3))\n assert_raises(np.AxisError, random.permutation, arr, 3)\n assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))\n\n def test_beta(self):\n random = Generator(MT19937(self.seed))\n actual = random.beta(.1, .9, size=(3, 2))\n desired = np.array(\n [[1.083029353267698e-10, 2.449965303168024e-11],\n [2.397085162969853e-02, 3.590779671820755e-08],\n [2.830254190078299e-04, 1.744709918330393e-01]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_binomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.binomial(100.123, .456, size=(3, 2))\n desired = np.array([[42, 41],\n [42, 48],\n [44, 50]])\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n actual = random.binomial(100.123, .456)\n desired = 42\n assert_array_equal(actual, desired)\n\n def test_chisquare(self):\n random = Generator(MT19937(self.seed))\n actual = random.chisquare(50, size=(3, 2))\n desired = np.array([[32.9850547060149, 39.0219480493301],\n [56.2006134779419, 57.3474165711485],\n [55.4243733880198, 55.4209797925213]])\n assert_array_almost_equal(actual, desired, decimal=13)\n\n def test_dirichlet(self):\n random = Generator(MT19937(self.seed))\n alpha = np.array([51.72840233779265162, 39.74494232180943953])\n actual = random.dirichlet(alpha, size=(3, 2))\n desired = np.array([[[0.5439892869558927, 0.45601071304410745],\n [0.5588917345860708, 0.4411082654139292 ]],\n [[0.5632074165063435, 0.43679258349365657],\n [0.54862581112627, 0.45137418887373015]],\n [[0.49961831357047226, 0.5003816864295278 ],\n [0.52374806183482, 0.47625193816517997]]])\n assert_array_almost_equal(actual, desired, decimal=15)\n bad_alpha = np.array([5.4e-01, -1.0e-16])\n assert_raises(ValueError, random.dirichlet, bad_alpha)\n\n random = Generator(MT19937(self.seed))\n alpha = np.array([51.72840233779265162, 39.74494232180943953])\n actual = random.dirichlet(alpha)\n assert_array_almost_equal(actual, desired[0, 0], decimal=15)\n\n def test_dirichlet_size(self):\n # gh-3173\n p = np.array([51.72840233779265162, 39.74494232180943953])\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))\n assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))\n assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))\n\n assert_raises(TypeError, random.dirichlet, p, float(1))\n\n def test_dirichlet_bad_alpha(self):\n # gh-2089\n alpha = np.array([5.4e-01, -1.0e-16])\n assert_raises(ValueError, random.dirichlet, alpha)\n\n # gh-15876\n assert_raises(ValueError, random.dirichlet, [[5, 1]])\n assert_raises(ValueError, random.dirichlet, [[5], [1]])\n assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])\n assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))\n\n def test_dirichlet_alpha_non_contiguous(self):\n a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])\n alpha = a[::2]\n random = Generator(MT19937(self.seed))\n non_contig = random.dirichlet(alpha, size=(3, 2))\n random = Generator(MT19937(self.seed))\n contig = random.dirichlet(np.ascontiguousarray(alpha),\n size=(3, 2))\n assert_array_almost_equal(non_contig, contig)\n\n def test_exponential(self):\n random = Generator(MT19937(self.seed))\n actual = random.exponential(1.1234, size=(3, 2))\n desired = np.array([[0.098845481066258, 1.560752510746964],\n [0.075730916041636, 1.769098974710777],\n [1.488602544592235, 2.49684815275751 ]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_exponential_0(self):\n assert_equal(random.exponential(scale=0), 0)\n assert_raises(ValueError, random.exponential, scale=-0.)\n\n def test_f(self):\n random = Generator(MT19937(self.seed))\n actual = random.f(12, 77, size=(3, 2))\n desired = np.array([[0.461720027077085, 1.100441958872451],\n [1.100337455217484, 0.91421736740018 ],\n [0.500811891303113, 0.826802454552058]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_gamma(self):\n random = Generator(MT19937(self.seed))\n actual = random.gamma(5, 3, size=(3, 2))\n desired = np.array([[ 5.03850858902096, 7.9228656732049 ],\n [18.73983605132985, 19.57961681699238],\n [18.17897755150825, 18.17653912505234]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_gamma_0(self):\n assert_equal(random.gamma(shape=0, scale=0), 0)\n assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)\n\n def test_geometric(self):\n random = Generator(MT19937(self.seed))\n actual = random.geometric(.123456789, size=(3, 2))\n desired = np.array([[ 1, 10],\n [ 1, 12],\n [ 9, 10]])\n assert_array_equal(actual, desired)\n\n def test_geometric_exceptions(self):\n assert_raises(ValueError, random.geometric, 1.1)\n assert_raises(ValueError, random.geometric, [1.1] * 10)\n assert_raises(ValueError, random.geometric, -0.1)\n assert_raises(ValueError, random.geometric, [-0.1] * 10)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.geometric, np.nan)\n assert_raises(ValueError, random.geometric, [np.nan] * 10)\n\n def test_gumbel(self):\n random = Generator(MT19937(self.seed))\n actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[ 4.688397515056245, -0.289514845417841],\n [ 4.981176042584683, -0.633224272589149],\n [-0.055915275687488, -0.333962478257953]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_gumbel_0(self):\n assert_equal(random.gumbel(scale=0), 0)\n assert_raises(ValueError, random.gumbel, scale=-0.)\n\n def test_hypergeometric(self):\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))\n desired = np.array([[ 9, 9],\n [ 9, 9],\n [10, 9]])\n assert_array_equal(actual, desired)\n\n # Test nbad = 0\n actual = random.hypergeometric(5, 0, 3, size=4)\n desired = np.array([3, 3, 3, 3])\n assert_array_equal(actual, desired)\n\n actual = random.hypergeometric(15, 0, 12, size=4)\n desired = np.array([12, 12, 12, 12])\n assert_array_equal(actual, desired)\n\n # Test ngood = 0\n actual = random.hypergeometric(0, 5, 3, size=4)\n desired = np.array([0, 0, 0, 0])\n assert_array_equal(actual, desired)\n\n actual = random.hypergeometric(0, 15, 12, size=4)\n desired = np.array([0, 0, 0, 0])\n assert_array_equal(actual, desired)\n\n def test_laplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-3.156353949272393, 1.195863024830054],\n [-3.435458081645966, 1.656882398925444],\n [ 0.924824032467446, 1.251116432209336]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_laplace_0(self):\n assert_equal(random.laplace(scale=0), 0)\n assert_raises(ValueError, random.laplace, scale=-0.)\n\n def test_logistic(self):\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-4.338584631510999, 1.890171436749954],\n [-4.64547787337966 , 2.514545562919217],\n [ 1.495389489198666, 1.967827627577474]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_lognormal(self):\n random = Generator(MT19937(self.seed))\n actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))\n desired = np.array([[ 0.0268252166335, 13.9534486483053],\n [ 0.1204014788936, 2.2422077497792],\n [ 4.2484199496128, 12.0093343977523]])\n assert_array_almost_equal(actual, desired, decimal=13)\n\n def test_lognormal_0(self):\n assert_equal(random.lognormal(sigma=0), 1)\n assert_raises(ValueError, random.lognormal, sigma=-0.)\n\n def test_logseries(self):\n random = Generator(MT19937(self.seed))\n actual = random.logseries(p=.923456789, size=(3, 2))\n desired = np.array([[14, 17],\n [3, 18],\n [5, 1]])\n assert_array_equal(actual, desired)\n\n def test_logseries_exceptions(self):\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.logseries, np.nan)\n assert_raises(ValueError, random.logseries, [np.nan] * 10)\n\n def test_multinomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))\n desired = np.array([[[1, 5, 1, 6, 4, 3],\n [4, 2, 6, 2, 4, 2]],\n [[5, 3, 2, 6, 3, 1],\n [4, 4, 0, 2, 3, 7]],\n [[6, 3, 1, 5, 3, 2],\n [5, 5, 3, 1, 2, 4]]])\n assert_array_equal(actual, desired)\n\n @pytest.mark.parametrize(\"method\", [\"svd\", \"eigh\", \"cholesky\"])\n def test_multivariate_normal(self, method):\n random = Generator(MT19937(self.seed))\n mean = (.123456789, 10)\n cov = [[1, 0], [0, 1]]\n size = (3, 2)\n actual = random.multivariate_normal(mean, cov, size, method=method)\n desired = np.array([[[-1.747478062846581, 11.25613495182354 ],\n [-0.9967333370066214, 10.342002097029821 ]],\n [[ 0.7850019631242964, 11.181113712443013 ],\n [ 0.8901349653255224, 8.873825399642492 ]],\n [[ 0.7130260107430003, 9.551628690083056 ],\n [ 0.7127098726541128, 11.991709234143173 ]]])\n\n assert_array_almost_equal(actual, desired, decimal=15)\n\n # Check for default size, was raising deprecation warning\n actual = random.multivariate_normal(mean, cov, method=method)\n desired = np.array([0.233278563284287, 9.424140804347195])\n assert_array_almost_equal(actual, desired, decimal=15)\n # Check that non symmetric covariance input raises exception when\n # check_valid='raises' if using default svd method.\n mean = [0, 0]\n cov = [[1, 2], [1, 2]]\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise')\n\n # Check that non positive-semidefinite covariance warns with\n # RuntimeWarning\n cov = [[1, 2], [2, 1]]\n assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)\n assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,\n method='eigh')\n assert_raises(LinAlgError, random.multivariate_normal, mean, cov,\n method='cholesky')\n\n # and that it doesn't warn with RuntimeWarning check_valid='ignore'\n assert_no_warnings(random.multivariate_normal, mean, cov,\n check_valid='ignore')\n\n # and that it raises with RuntimeWarning check_valid='raises'\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise')\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise', method='eigh')\n\n # check degenerate samples from singular covariance matrix\n cov = [[1, 1], [1, 1]]\n if method in ('svd', 'eigh'):\n samples = random.multivariate_normal(mean, cov, size=(3, 2),\n method=method)\n assert_array_almost_equal(samples[..., 0], samples[..., 1],\n decimal=6)\n else:\n assert_raises(LinAlgError, random.multivariate_normal, mean, cov,\n method='cholesky')\n\n cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)\n with suppress_warnings() as sup:\n random.multivariate_normal(mean, cov, method=method)\n w = sup.record(RuntimeWarning)\n assert len(w) == 0\n\n mu = np.zeros(2)\n cov = np.eye(2)\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='other')\n assert_raises(ValueError, random.multivariate_normal,\n np.zeros((2, 1, 1)), cov)\n assert_raises(ValueError, random.multivariate_normal,\n mu, np.empty((3, 2)))\n assert_raises(ValueError, random.multivariate_normal,\n mu, np.eye(3))\n\n @pytest.mark.parametrize(\"method\", [\"svd\", \"eigh\", \"cholesky\"])\n def test_multivariate_normal_basic_stats(self, method):\n random = Generator(MT19937(self.seed))\n n_s = 1000\n mean = np.array([1, 2])\n cov = np.array([[2, 1], [1, 2]])\n s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)\n s_center = s - mean\n cov_emp = (s_center.T @ s_center) / (n_s - 1)\n # these are pretty loose and are only designed to detect major errors\n assert np.all(np.abs(s_center.mean(-2)) < 0.1)\n assert np.all(np.abs(cov_emp - cov) < 0.2)\n\n def test_negative_binomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))\n desired = np.array([[543, 727],\n [775, 760],\n [600, 674]])\n assert_array_equal(actual, desired)\n\n def test_negative_binomial_exceptions(self):\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.negative_binomial, 100, np.nan)\n assert_raises(ValueError, random.negative_binomial, 100,\n [np.nan] * 10)\n\n def test_negative_binomial_p0_exception(self):\n # Verify that p=0 raises an exception.\n with assert_raises(ValueError):\n x = random.negative_binomial(1, 0)\n\n def test_noncentral_chisquare(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))\n desired = np.array([[ 1.70561552362133, 15.97378184942111],\n [13.71483425173724, 20.17859633310629],\n [11.3615477156643 , 3.67891108738029]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))\n desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],\n [1.14554372041263e+00, 1.38187755933435e-03],\n [1.90659181905387e+00, 1.21772577941822e+00]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))\n desired = np.array([[0.82947954590419, 1.80139670767078],\n [6.58720057417794, 7.00491463609814],\n [6.31101879073157, 6.30982307753005]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_noncentral_f(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,\n size=(3, 2))\n desired = np.array([[0.060310671139 , 0.23866058175939],\n [0.86860246709073, 0.2668510459738 ],\n [0.23375780078364, 1.88922102885943]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_noncentral_f_nan(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)\n assert np.isnan(actual)\n\n def test_normal(self):\n random = Generator(MT19937(self.seed))\n actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-3.618412914693162, 2.635726692647081],\n [-2.116923463013243, 0.807460983059643],\n [ 1.446547137248593, 2.485684213886024]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_normal_0(self):\n assert_equal(random.normal(scale=0), 0)\n assert_raises(ValueError, random.normal, scale=-0.)\n\n def test_pareto(self):\n random = Generator(MT19937(self.seed))\n actual = random.pareto(a=.123456789, size=(3, 2))\n desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],\n [7.2640150889064703e-01, 3.4650454783825594e+05],\n [4.5852344481994740e+04, 6.5851383009539105e+07]])\n # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this\n # matrix differs by 24 nulps. Discussion:\n # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html\n # Consensus is that this is probably some gcc quirk that affects\n # rounding but not in any important way, so we just use a looser\n # tolerance on this test:\n np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)\n\n def test_poisson(self):\n random = Generator(MT19937(self.seed))\n actual = random.poisson(lam=.123456789, size=(3, 2))\n desired = np.array([[0, 0],\n [0, 0],\n [0, 0]])\n assert_array_equal(actual, desired)\n\n def test_poisson_exceptions(self):\n lambig = np.iinfo('int64').max\n lamneg = -1\n assert_raises(ValueError, random.poisson, lamneg)\n assert_raises(ValueError, random.poisson, [lamneg] * 10)\n assert_raises(ValueError, random.poisson, lambig)\n assert_raises(ValueError, random.poisson, [lambig] * 10)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.poisson, np.nan)\n assert_raises(ValueError, random.poisson, [np.nan] * 10)\n\n def test_power(self):\n random = Generator(MT19937(self.seed))\n actual = random.power(a=.123456789, size=(3, 2))\n desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],\n [2.482442984543471e-10, 1.527108843266079e-01],\n [8.188283434244285e-02, 3.950547209346948e-01]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_rayleigh(self):\n random = Generator(MT19937(self.seed))\n actual = random.rayleigh(scale=10, size=(3, 2))\n desired = np.array([[ 4.51734079831581, 15.6802442485758 ],\n [ 4.19850651287094, 17.08718809823704],\n [14.7907457708776 , 15.85545333419775]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_rayleigh_0(self):\n assert_equal(random.rayleigh(scale=0), 0)\n assert_raises(ValueError, random.rayleigh, scale=-0.)\n\n def test_standard_cauchy(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_cauchy(size=(3, 2))\n desired = np.array([[-1.489437778266206, -3.275389641569784],\n [ 0.560102864910406, -0.680780916282552],\n [-1.314912905226277, 0.295852965660225]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_exponential(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_exponential(size=(3, 2), method='inv')\n desired = np.array([[0.102031839440643, 1.229350298474972],\n [0.088137284693098, 1.459859985522667],\n [1.093830802293668, 1.256977002164613]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_expoential_type_error(self):\n assert_raises(TypeError, random.standard_exponential, dtype=np.int32)\n\n def test_standard_gamma(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(shape=3, size=(3, 2))\n desired = np.array([[0.62970724056362, 1.22379851271008],\n [3.899412530884 , 4.12479964250139],\n [3.74994102464584, 3.74929307690815]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_standard_gammma_scalar_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(3, dtype=np.float32)\n desired = 2.9242148399353027\n assert_array_almost_equal(actual, desired, decimal=6)\n\n def test_standard_gamma_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(shape=3, size=(3, 2))\n desired = np.array([[0.62971, 1.2238 ],\n [3.89941, 4.1248 ],\n [3.74994, 3.74929]])\n assert_array_almost_equal(actual, desired, decimal=5)\n\n def test_standard_gammma_float_out(self):\n actual = np.zeros((3, 2), dtype=np.float32)\n random = Generator(MT19937(self.seed))\n random.standard_gamma(10.0, out=actual, dtype=np.float32)\n desired = np.array([[10.14987, 7.87012],\n [ 9.46284, 12.56832],\n [13.82495, 7.81533]], dtype=np.float32)\n assert_array_almost_equal(actual, desired, decimal=5)\n\n random = Generator(MT19937(self.seed))\n random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)\n assert_array_almost_equal(actual, desired, decimal=5)\n\n def test_standard_gamma_unknown_type(self):\n assert_raises(TypeError, random.standard_gamma, 1.,\n dtype='int32')\n\n def test_out_size_mismatch(self):\n out = np.zeros(10)\n assert_raises(ValueError, random.standard_gamma, 10.0, size=20,\n out=out)\n assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),\n out=out)\n\n def test_standard_gamma_0(self):\n assert_equal(random.standard_gamma(shape=0), 0)\n assert_raises(ValueError, random.standard_gamma, shape=-0.)\n\n def test_standard_normal(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_normal(size=(3, 2))\n desired = np.array([[-1.870934851846581, 1.25613495182354 ],\n [-1.120190126006621, 0.342002097029821],\n [ 0.661545174124296, 1.181113712443012]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_normal_unsupported_type(self):\n assert_raises(TypeError, random.standard_normal, dtype=np.int32)\n\n def test_standard_t(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_t(df=10, size=(3, 2))\n desired = np.array([[-1.484666193042647, 0.30597891831161 ],\n [ 1.056684299648085, -0.407312602088507],\n [ 0.130704414281157, -2.038053410490321]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_triangular(self):\n random = Generator(MT19937(self.seed))\n actual = random.triangular(left=5.12, mode=10.23, right=20.34,\n size=(3, 2))\n desired = np.array([[ 7.86664070590917, 13.6313848513185 ],\n [ 7.68152445215983, 14.36169131136546],\n [13.16105603911429, 13.72341621856971]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_uniform(self):\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low=1.23, high=10.54, size=(3, 2))\n desired = np.array([[2.13306255040998 , 7.816987531021207],\n [2.015436610109887, 8.377577533009589],\n [7.421792588856135, 7.891185744455209]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_uniform_range_bounds(self):\n fmin = np.finfo('float').min\n fmax = np.finfo('float').max\n\n func = random.uniform\n assert_raises(OverflowError, func, -np.inf, 0)\n assert_raises(OverflowError, func, 0, np.inf)\n assert_raises(OverflowError, func, fmin, fmax)\n assert_raises(OverflowError, func, [-np.inf], [0])\n assert_raises(OverflowError, func, [0], [np.inf])\n\n # (fmax / 1e17) - fmin is within range, so this should not throw\n # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >\n # DBL_MAX by increasing fmin a bit\n random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)\n\n def test_scalar_exception_propagation(self):\n # Tests that exceptions are correctly propagated in distributions\n # when called with objects that throw exceptions when converted to\n # scalars.\n #\n # Regression test for gh: 8865\n\n class ThrowingFloat(np.ndarray):\n def __float__(self):\n raise TypeError\n\n throwing_float = np.array(1.0).view(ThrowingFloat)\n assert_raises(TypeError, random.uniform, throwing_float,\n throwing_float)\n\n class ThrowingInteger(np.ndarray):\n def __int__(self):\n raise TypeError\n\n throwing_int = np.array(1).view(ThrowingInteger)\n assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)\n\n def test_vonmises(self):\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))\n desired = np.array([[ 1.107972248690106, 2.841536476232361],\n [ 1.832602376042457, 1.945511926976032],\n [-0.260147475776542, 2.058047492231698]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_vonmises_small(self):\n # check infinite loop, gh-4720\n random = Generator(MT19937(self.seed))\n r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)\n assert_(np.isfinite(r).all())\n\n def test_vonmises_nan(self):\n random = Generator(MT19937(self.seed))\n r = random.vonmises(mu=0., kappa=np.nan)\n assert_(np.isnan(r))\n\n def test_wald(self):\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))\n desired = np.array([[0.26871721804551, 3.2233942732115 ],\n [2.20328374987066, 2.40958405189353],\n [2.07093587449261, 0.73073890064369]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_weibull(self):\n random = Generator(MT19937(self.seed))\n actual = random.weibull(a=1.23, size=(3, 2))\n desired = np.array([[0.138613914769468, 1.306463419753191],\n [0.111623365934763, 1.446570494646721],\n [1.257145775276011, 1.914247725027957]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_weibull_0(self):\n random = Generator(MT19937(self.seed))\n assert_equal(random.weibull(a=0, size=12), np.zeros(12))\n assert_raises(ValueError, random.weibull, a=-0.)\n\n def test_zipf(self):\n random = Generator(MT19937(self.seed))\n actual = random.zipf(a=1.23, size=(3, 2))\n desired = np.array([[ 1, 1],\n [ 10, 867],\n [354, 2]])\n assert_array_equal(actual, desired)\n\n\nclass TestBroadcast:\n # tests that functions that broadcast behave\n # correctly when presented with non-scalar arguments\n def setup(self):\n self.seed = 123456789\n\n\n def test_uniform(self):\n random = Generator(MT19937(self.seed))\n low = [0]\n high = [1]\n uniform = random.uniform\n desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])\n\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low * 3, high)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low, high * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_normal(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n random = Generator(MT19937(self.seed))\n desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])\n\n random = Generator(MT19937(self.seed))\n actual = random.normal(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.normal, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n normal = random.normal\n actual = normal(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, normal, loc, bad_scale * 3)\n\n def test_beta(self):\n a = [1]\n b = [2]\n bad_a = [-1]\n bad_b = [-2]\n desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])\n\n random = Generator(MT19937(self.seed))\n beta = random.beta\n actual = beta(a * 3, b)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, beta, bad_a * 3, b)\n assert_raises(ValueError, beta, a * 3, bad_b)\n\n random = Generator(MT19937(self.seed))\n actual = random.beta(a, b * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_exponential(self):\n scale = [1]\n bad_scale = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n actual = random.exponential(scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.exponential, bad_scale * 3)\n\n def test_standard_gamma(self):\n shape = [1]\n bad_shape = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n std_gamma = random.standard_gamma\n actual = std_gamma(shape * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, std_gamma, bad_shape * 3)\n\n def test_gamma(self):\n shape = [1]\n scale = [2]\n bad_shape = [-1]\n bad_scale = [-2]\n desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])\n\n random = Generator(MT19937(self.seed))\n gamma = random.gamma\n actual = gamma(shape * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gamma, bad_shape * 3, scale)\n assert_raises(ValueError, gamma, shape * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n gamma = random.gamma\n actual = gamma(shape, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gamma, bad_shape, scale * 3)\n assert_raises(ValueError, gamma, shape, bad_scale * 3)\n\n def test_f(self):\n dfnum = [1]\n dfden = [2]\n bad_dfnum = [-1]\n bad_dfden = [-2]\n desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])\n\n random = Generator(MT19937(self.seed))\n f = random.f\n actual = f(dfnum * 3, dfden)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, f, bad_dfnum * 3, dfden)\n assert_raises(ValueError, f, dfnum * 3, bad_dfden)\n\n random = Generator(MT19937(self.seed))\n f = random.f\n actual = f(dfnum, dfden * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, f, bad_dfnum, dfden * 3)\n assert_raises(ValueError, f, dfnum, bad_dfden * 3)\n\n def test_noncentral_f(self):\n dfnum = [2]\n dfden = [3]\n nonc = [4]\n bad_dfnum = [0]\n bad_dfden = [-1]\n bad_nonc = [-2]\n desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum * 3, dfden, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))\n\n assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)\n assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)\n assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum, dfden * 3, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)\n assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)\n assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum, dfden, nonc * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)\n assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)\n assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)\n\n def test_noncentral_f_small_df(self):\n random = Generator(MT19937(self.seed))\n desired = np.array([0.04714867120827, 0.1239390327694])\n actual = random.noncentral_f(0.9, 0.9, 2, size=2)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_chisquare(self):\n df = [1]\n bad_df = [-1]\n desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])\n\n random = Generator(MT19937(self.seed))\n actual = random.chisquare(df * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.chisquare, bad_df * 3)\n\n def test_noncentral_chisquare(self):\n df = [1]\n nonc = [2]\n bad_df = [-1]\n bad_nonc = [-2]\n desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])\n\n random = Generator(MT19937(self.seed))\n nonc_chi = random.noncentral_chisquare\n actual = nonc_chi(df * 3, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)\n assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_chi = random.noncentral_chisquare\n actual = nonc_chi(df, nonc * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)\n assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)\n\n def test_standard_t(self):\n df = [1]\n bad_df = [-1]\n desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])\n\n random = Generator(MT19937(self.seed))\n actual = random.standard_t(df * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.standard_t, bad_df * 3)\n\n def test_vonmises(self):\n mu = [2]\n kappa = [1]\n bad_kappa = [-1]\n desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])\n\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu * 3, kappa)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)\n\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu, kappa * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)\n\n def test_pareto(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])\n\n random = Generator(MT19937(self.seed))\n actual = random.pareto(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.pareto, bad_a * 3)\n\n def test_weibull(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n actual = random.weibull(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.weibull, bad_a * 3)\n\n def test_power(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])\n\n random = Generator(MT19937(self.seed))\n actual = random.power(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.power, bad_a * 3)\n\n def test_laplace(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])\n\n random = Generator(MT19937(self.seed))\n laplace = random.laplace\n actual = laplace(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, laplace, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n laplace = random.laplace\n actual = laplace(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, laplace, loc, bad_scale * 3)\n\n def test_gumbel(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])\n\n random = Generator(MT19937(self.seed))\n gumbel = random.gumbel\n actual = gumbel(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gumbel, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n gumbel = random.gumbel\n actual = gumbel(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gumbel, loc, bad_scale * 3)\n\n def test_logistic(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])\n\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.logistic, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.logistic, loc, bad_scale * 3)\n assert_equal(random.logistic(1.0, 0.0), 1.0)\n\n def test_lognormal(self):\n mean = [0]\n sigma = [1]\n bad_sigma = [-1]\n desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])\n\n random = Generator(MT19937(self.seed))\n lognormal = random.lognormal\n actual = lognormal(mean * 3, sigma)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, lognormal, mean * 3, bad_sigma)\n\n random = Generator(MT19937(self.seed))\n actual = random.lognormal(mean, sigma * 3)\n assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)\n\n def test_rayleigh(self):\n scale = [1]\n bad_scale = [-1]\n desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])\n\n random = Generator(MT19937(self.seed))\n actual = random.rayleigh(scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.rayleigh, bad_scale * 3)\n\n def test_wald(self):\n mean = [0.5]\n scale = [1]\n bad_mean = [0]\n bad_scale = [-2]\n desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])\n\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.wald, bad_mean * 3, scale)\n assert_raises(ValueError, random.wald, mean * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.wald, bad_mean, scale * 3)\n assert_raises(ValueError, random.wald, mean, bad_scale * 3)\n\n def test_triangular(self):\n left = [1]\n right = [3]\n mode = [2]\n bad_left_one = [3]\n bad_mode_one = [4]\n bad_left_two, bad_mode_two = right * 2\n desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left * 3, mode, right)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)\n assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)\n assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,\n right)\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left, mode * 3, right)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)\n assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)\n assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,\n right)\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left, mode, right * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)\n assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)\n assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,\n right * 3)\n\n assert_raises(ValueError, triangular, 10., 0., 20.)\n assert_raises(ValueError, triangular, 10., 25., 20.)\n assert_raises(ValueError, triangular, 10., 10., 10.)\n\n def test_binomial(self):\n n = [1]\n p = [0.5]\n bad_n = [-1]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([0, 0, 1])\n\n random = Generator(MT19937(self.seed))\n binom = random.binomial\n actual = binom(n * 3, p)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, binom, bad_n * 3, p)\n assert_raises(ValueError, binom, n * 3, bad_p_one)\n assert_raises(ValueError, binom, n * 3, bad_p_two)\n\n random = Generator(MT19937(self.seed))\n actual = random.binomial(n, p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, binom, bad_n, p * 3)\n assert_raises(ValueError, binom, n, bad_p_one * 3)\n assert_raises(ValueError, binom, n, bad_p_two * 3)\n\n def test_negative_binomial(self):\n n = [1]\n p = [0.5]\n bad_n = [-1]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([0, 2, 1], dtype=np.int64)\n\n random = Generator(MT19937(self.seed))\n neg_binom = random.negative_binomial\n actual = neg_binom(n * 3, p)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, neg_binom, bad_n * 3, p)\n assert_raises(ValueError, neg_binom, n * 3, bad_p_one)\n assert_raises(ValueError, neg_binom, n * 3, bad_p_two)\n\n random = Generator(MT19937(self.seed))\n neg_binom = random.negative_binomial\n actual = neg_binom(n, p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, neg_binom, bad_n, p * 3)\n assert_raises(ValueError, neg_binom, n, bad_p_one * 3)\n assert_raises(ValueError, neg_binom, n, bad_p_two * 3)\n\n def test_poisson(self):\n\n lam = [1]\n bad_lam_one = [-1]\n desired = np.array([0, 0, 3])\n\n random = Generator(MT19937(self.seed))\n max_lam = random._poisson_lam_max\n bad_lam_two = [max_lam * 2]\n poisson = random.poisson\n actual = poisson(lam * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, poisson, bad_lam_one * 3)\n assert_raises(ValueError, poisson, bad_lam_two * 3)\n\n def test_zipf(self):\n a = [2]\n bad_a = [0]\n desired = np.array([1, 8, 1])\n\n random = Generator(MT19937(self.seed))\n zipf = random.zipf\n actual = zipf(a * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, zipf, bad_a * 3)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, zipf, np.nan)\n assert_raises(ValueError, zipf, [0, 0, np.nan])\n\n def test_geometric(self):\n p = [0.5]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([1, 1, 3])\n\n random = Generator(MT19937(self.seed))\n geometric = random.geometric\n actual = geometric(p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, geometric, bad_p_one * 3)\n assert_raises(ValueError, geometric, bad_p_two * 3)\n\n def test_hypergeometric(self):\n ngood = [1]\n nbad = [2]\n nsample = [2]\n bad_ngood = [-1]\n bad_nbad = [-2]\n bad_nsample_one = [-1]\n bad_nsample_two = [4]\n desired = np.array([0, 0, 1])\n\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(ngood * 3, nbad, nsample)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)\n\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(ngood, nbad * 3, nsample)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)\n assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)\n\n random = Generator(MT19937(self.seed))\n hypergeom = random.hypergeometric\n actual = hypergeom(ngood, nbad, nsample * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)\n assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)\n assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)\n assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)\n\n assert_raises(ValueError, hypergeom, -1, 10, 20)\n assert_raises(ValueError, hypergeom, 10, -1, 20)\n assert_raises(ValueError, hypergeom, 10, 10, -1)\n assert_raises(ValueError, hypergeom, 10, 10, 25)\n\n # ValueError for arguments that are too big.\n assert_raises(ValueError, hypergeom, 2**30, 10, 20)\n assert_raises(ValueError, hypergeom, 999, 2**31, 50)\n assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)\n\n def test_logseries(self):\n p = [0.5]\n bad_p_one = [2]\n bad_p_two = [-1]\n desired = np.array([1, 1, 1])\n\n random = Generator(MT19937(self.seed))\n logseries = random.logseries\n actual = logseries(p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, logseries, bad_p_one * 3)\n assert_raises(ValueError, logseries, bad_p_two * 3)\n\n def test_multinomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))\n desired = np.array([[[0, 0, 2, 1, 2, 0],\n [2, 3, 6, 4, 2, 3]],\n [[1, 0, 1, 0, 2, 1],\n [7, 2, 2, 1, 4, 4]],\n [[0, 2, 0, 1, 2, 0],\n [3, 2, 3, 3, 4, 5]]], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n actual = random.multinomial([5, 20], [1 / 6.] * 6)\n desired = np.array([[0, 0, 2, 1, 2, 0],\n [2, 3, 6, 4, 2, 3]], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n\nclass TestThread:\n # make sure each state produces the same sequence even in threads\n def setup(self):\n self.seeds = range(4)\n\n def check_function(self, function, sz):\n from threading import Thread\n\n out1 = np.empty((len(self.seeds),) + sz)\n out2 = np.empty((len(self.seeds),) + sz)\n\n # threaded generation\n t = [Thread(target=function, args=(Generator(MT19937(s)), o))\n for s, o in zip(self.seeds, out1)]\n [x.start() for x in t]\n [x.join() for x in t]\n\n # the same serial\n for s, o in zip(self.seeds, out2):\n function(Generator(MT19937(s)), o)\n\n # these platforms change x87 fpu precision mode in threads\n if np.intp().dtype.itemsize == 4 and sys.platform == \"win32\":\n assert_array_almost_equal(out1, out2)\n else:\n assert_array_equal(out1, out2)\n\n def test_normal(self):\n def gen_random(state, out):\n out[...] = state.normal(size=10000)\n\n self.check_function(gen_random, sz=(10000,))\n\n def test_exp(self):\n def gen_random(state, out):\n out[...] = state.exponential(scale=np.ones((100, 1000)))\n\n self.check_function(gen_random, sz=(100, 1000))\n\n def test_multinomial(self):\n def gen_random(state, out):\n out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)\n\n self.check_function(gen_random, sz=(10000, 6))\n\n\n# See Issue #4263\nclass TestSingleEltArrayInput:\n def setup(self):\n self.argOne = np.array([2])\n self.argTwo = np.array([3])\n self.argThree = np.array([4])\n self.tgtShape = (1,)\n\n def test_one_arg_funcs(self):\n funcs = (random.exponential, random.standard_gamma,\n random.chisquare, random.standard_t,\n random.pareto, random.weibull,\n random.power, random.rayleigh,\n random.poisson, random.zipf,\n random.geometric, random.logseries)\n\n probfuncs = (random.geometric, random.logseries)\n\n for func in funcs:\n if func in probfuncs: # p < 1.0\n out = func(np.array([0.5]))\n\n else:\n out = func(self.argOne)\n\n assert_equal(out.shape, self.tgtShape)\n\n def test_two_arg_funcs(self):\n funcs = (random.uniform, random.normal,\n random.beta, random.gamma,\n random.f, random.noncentral_chisquare,\n random.vonmises, random.laplace,\n random.gumbel, random.logistic,\n random.lognormal, random.wald,\n random.binomial, random.negative_binomial)\n\n probfuncs = (random.binomial, random.negative_binomial)\n\n for func in funcs:\n if func in probfuncs: # p <= 1\n argTwo = np.array([0.5])\n\n else:\n argTwo = self.argTwo\n\n out = func(self.argOne, argTwo)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne[0], argTwo)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne, argTwo[0])\n assert_equal(out.shape, self.tgtShape)\n\n def test_integers(self, endpoint):\n itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,\n np.int32, np.uint32, np.int64, np.uint64]\n func = random.integers\n high = np.array([1])\n low = np.array([0])\n\n for dt in itype:\n out = func(low, high, endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(low[0], high, endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(low, high[0], endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n def test_three_arg_funcs(self):\n funcs = [random.noncentral_f, random.triangular,\n random.hypergeometric]\n\n for func in funcs:\n out = func(self.argOne, self.argTwo, self.argThree)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne[0], self.argTwo, self.argThree)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne, self.argTwo[0], self.argThree)\n assert_equal(out.shape, self.tgtShape)\n" ]
[ [ "numpy.random.MT19937", "numpy.testing.assert_no_warnings", "numpy.asarray", "numpy.vstack", "numpy.dtype", "numpy.all", "numpy.iinfo", "numpy.nextafter", "numpy.testing.assert_equal", "numpy.uint32", "numpy.unique", "numpy.testing.suppress_warnings", "numpy.arange", "numpy.eye", "numpy.full", "numpy.finfo", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "numpy.isnan", "numpy.testing.assert_array_almost_equal_nulp", "numpy.ascontiguousarray", "numpy.atleast_2d", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.errstate", "numpy.array", "numpy.testing.assert_warns", "numpy.sum", "numpy.abs", "numpy.isfinite", "numpy.intp", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.random.SeedSequence", "numpy.empty" ] ]
manoloesparta/deeps
[ "5e073da05305f275b1f5930ea2a2746141cb9d1e" ]
[ "churn_modeling/main.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom train import train_model\nfrom save_model import save_model\nfrom load_model import load_model\nfrom preprocessing import preprocessing\nfrom one_sample import one_sample\n\nfrom pathlib import Path\n\nfrom sklearn.metrics import classification_report\n\ndataset = pd.read_csv(\"Churn_Modelling.csv\")\nX_train,y_train,X_test,y_test,scaler = preprocessing(dataset)\n\nload = int(input(\"Load Model (0/1): \"))\n\nif load == 1:\n\n\tfile = input(\"File (no extensions): \")\n\n\tif not Path(file + '.json').is_file():\n\t\tprint(\"File not found\")\n\t\tload = 0\n\telse:\n\t\tmodel = load_model(file)\n\nif load == 0:\n\n\tprint(\"Creating neural network...\")\n\n\tfile = input(\"Name: \")\n\tepochs = int(input(\"Epcohs: \"))\n\tbatch = int(input(\"Batch Size: \"))\n\n\tmodel = train_model(X_train,y_train,batch,epochs,file)\n\n\nmore_train = int(input(\"More training (0/1): \"))\n\nif more_train == 1:\n\n\tmore_epochs = int(input(\"How much epochs: \"))\n\tbatch = int(input(\"Batch Size: \"))\n\tmodel = train_model(X_train,y_train,batch,more_epochs,file)\n\n\npredict = int(input(\"Predict y_test (0/1): \"))\n\ny_pred = model.predict(X_test)\ny_pred_bool = (y_pred > 0.5)\n\nif predict == 1:\n\n\tprint(\"Probability of each one to quit, sample = 10\")\n\tfor i in y_pred[0:10]:\n\t\tprint(i)\n\n\tprint(\"They will quit in the next 6 months?, sample = 10\")\n\tfor i in y_pred_bool[0:10]:\n\t\tprint(i)\n\nmedir = int(input(\"Check precision (0/1): \"))\n\nif medir == 1:\n\n\taccuracy = classification_report(y_test,y_pred_bool)\n\tprint(accuracy)\n\ntryit = int(input(\"Predict over one sample (0/1): \"))\n\nif tryit == 1:\n\n\tpred = model.predict(one_sample())\n\tprint(\"Probability to get out\")\n\tprint(pred)\n\n\tpred = (pred > 0.5)\n\tprint(\"He will quit in the next 6 months?\")\n\tprint(pred)\n\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.classification_report" ] ]
bryan-brancotte/rank-aggregation-with-ties
[ "15fffb0b1bee3d6cef7090486a7c910e5f51195d" ]
[ "sources/rnt/mediane/algorithms/Schulze/Schulze.py" ]
[ "from mediane.algorithms.median_ranking import MedianRanking # , DistanceNotHandledException\nfrom mediane.distances.enumeration import GENERALIZED_KENDALL_TAU_DISTANCE, GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE, \\\n PSEUDO_METRIC_BASED_ON_GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE, GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION\nfrom typing import List\nfrom numpy import zeros\nimport numpy as np\n\n\nclass Schulze(MedianRanking):\n\n def compute_median_rankings(\n self,\n rankings: List[List[List[int]]],\n distance,\n return_at_most_one_ranking: bool = False)-> List[List[List[int]]]:\n \"\"\"\n :param rankings: A set of rankings\n :type rankings: list\n :param distance: The distance to use/consider\n :type distance: Distance\n :param return_at_most_one_ranking: the algorithm should not return more than one ranking\n :type return_at_most_one_ranking: bool\n :return one or more consensus if the underlying algorithm can find multiple solution as good as each other.\n If the algorithm is not able to provide multiple consensus, or if return_at_most_one_ranking is True then, it\n should return a list made of the only / the first consensus found\n :raise DistanceNotHandledException when the algorithm cannot compute the consensus following the distance given\n as parameter\n \"\"\"\n all_elements = []\n consensus = [all_elements]\n elements = set()\n for ranking in rankings:\n for bucket in ranking:\n for element in bucket:\n elements.add(element)\n if len(elements) == 0:\n return []\n for element in elements:\n all_elements.append(element)\n return [consensus]\n\n @staticmethod\n def prepare_internal_vars(rankings: List[List[List[int]]]):\n elements_id = {}\n id_elements = {}\n id_element = 0\n for ranking in rankings:\n for bucket in ranking:\n print(bucket)\n for element in bucket:\n print(element)\n if element not in elements_id:\n elements_id[element] = id_element\n id_elements[id_element] = element\n id_element += 1\n\n # print(elements)\n\n positions = zeros((len(elements_id), len(rankings)), dtype=int) - 1\n\n # print(positions)\n\n id_ranking = 0\n for ranking in rankings:\n id_bucket = 0\n for bucket in ranking:\n for element in bucket:\n positions[elements_id.get(element)][id_ranking] = id_bucket\n id_bucket += 1\n id_ranking += 1\n return elements_id, id_elements, positions\n\n @staticmethod\n def calculate_pairwise_score(matrix_ranking: np.ndarray, i: int, j: int, v_b: np.ndarray):\n\n a = np.sum(matrix_ranking[i] < matrix_ranking[j])\n b = np.sum(matrix_ranking[i] > matrix_ranking[j])\n c = np.sum(matrix_ranking[i] == -1)\n d = np.sum(matrix_ranking[j] == -1)\n e = np.sum(matrix_ranking[i] + matrix_ranking[j] == -2)\n f = np.sum(matrix_ranking[i] == matrix_ranking[j])\n\n return v_b[0] * (a - c) + v_b[1] * (b - d) + v_b[2] * (f - e) + v_b[3] * (d - e) + v_b[4] * (c - e) + v_b[5] * e\n\n @staticmethod\n def pairwise_preferences(matrix_ranking: np.ndarray, nb_elements: int, vector_b: np.ndarray):\n\n matrix_pairwise_pref = zeros((nb_elements, nb_elements), dtype=int)\n\n i = 0\n while i < nb_elements:\n\n j = i + 1\n while j < nb_elements:\n matrix_pairwise_pref[i][j] = Schulze.calculate_pairwise_score(matrix_ranking, i, j, vector_b)\n matrix_pairwise_pref[j][i] = Schulze.calculate_pairwise_score(matrix_ranking, j, i, vector_b)\n\n j += 1\n\n i += 1\n\n return matrix_pairwise_pref\n\n @staticmethod\n def link_strength(matrix_pairwise_pref: List[List[int]], elements: List, strength_rule_calculation='margin'):\n\n # séparer en différents cas fonctions. if avant les boucles+ enum\n\n matrix_p = zeros((len(elements), len(elements)), dtype=int)\n matrix_pred = zeros((len(elements), len(elements)), dtype=int)\n\n i = 0\n while i < len(elements):\n j = 0\n while j < len(elements):\n if i != j:\n if strength_rule_calculation == 'ratio':\n strength = matrix_pairwise_pref[i][j] / matrix_pairwise_pref[j][i]\n elif strength_rule_calculation == 'winning_votes':\n strength = matrix_pairwise_pref[i][j]\n elif strength_rule_calculation == 'losing_votes':\n strength = len(elements) - matrix_pairwise_pref[j][i]\n else: # strength_rule_calculation == 'margin'\n strength = matrix_pairwise_pref[i][j] - matrix_pairwise_pref[j][i]\n\n matrix_p[i][j] = strength\n matrix_pred[i][j] = j\n else:\n matrix_p[i][j] = 0\n matrix_pred[i][j] = -1\n\n j += 1\n\n i += 1\n\n return [matrix_p, matrix_pred]\n\n @staticmethod\n def strength_strongest_path(matrix_p: List[List], matrix_pred: List[List], elements: List):\n\n i = 0\n while i < len(elements):\n j = 0\n while j < len(elements):\n if i != j:\n k = 0\n while k < len(elements):\n if i != k:\n if j != k:\n \"\"\"\n print(\"i : {}, j : {}, k : {}\".format(i, j, k))\n print(Matrix_P[j][k])\n print(Matrix_P[j][i])\n print(Matrix_P[i][k])\n \"\"\"\n if matrix_p[j][k] < min(matrix_p[j][i], matrix_p[i][k]):\n matrix_p[j][k] = min(matrix_p[j][i], matrix_p[i][k])\n if matrix_pred[j][k] != matrix_pred[i][k]:\n matrix_pred[j][k] = matrix_pred[i][k]\n k += 1\n\n j += 1\n\n i += 1\n\n return matrix_p\n\n @staticmethod\n def binary_relation(matrix_p: List[List], elements: List):\n\n elements_score = []\n i = 0\n while i < len(elements):\n score = 0\n j = 0\n while j < len(elements):\n if matrix_p[i][j] > matrix_p[j][i]:\n score += 1\n j += 1\n\n elements_score.append(score)\n i += 1\n\n return elements_score\n\n @staticmethod\n def compute_consensus(elements: List, final_scores: List):\n\n d_consensus = {}\n for i in range(0, len(final_scores)):\n if final_scores[i] not in d_consensus:\n d_consensus[final_scores[i]] = []\n d_consensus[final_scores[i]].append(elements[i])\n print(d_consensus)\n d_consensus_sorted = sorted(d_consensus)\n\n consensus = []\n for element in d_consensus_sorted:\n consensus.append(d_consensus[element])\n\n return consensus[::-1]\n\n def is_breaking_ties_arbitrarily(self) -> bool:\n return False\n\n def is_using_random_value(self) -> bool:\n return False\n\n def get_full_name(self) -> str:\n return \"Schulze method\"\n\n def can_be_executed(self) -> bool:\n \"\"\"\n :return true if the algorithm can be run without any risk of missing lib such as cplex\n \"\"\"\n return True\n\n def get_handled_distances(self) -> List[int]:\n \"\"\"\n\n :return: a list of distances from distance_enumeration\n \"\"\"\n return [\n GENERALIZED_KENDALL_TAU_DISTANCE,\n GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE,\n PSEUDO_METRIC_BASED_ON_GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE,\n GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION\n ]\n" ]
[ [ "numpy.zeros", "numpy.sum" ] ]
huonw/stellargraph
[ "60edf4a6268f29b49b7c768c382e235af4108506" ]
[ "demos/node-classification-graphsage/graphsage-cora-example.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright 2018 Data61, CSIRO\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGraph node classification using GraphSAGE.\nThis currently is only tested on the CORA dataset, which can be downloaded from https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz\n\nThe following is the description of the dataset:\n> The Cora dataset consists of 2708 scientific publications classified into one of seven classes.\n> The citation network consists of 5429 links. Each publication in the dataset is described by a\n> 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary.\n> The dictionary consists of 1433 unique words. The README file in the dataset provides more details.\n\nDownload and unzip the cora.tgz file to a location on your computer and pass this location\n(which should contain cora.cites and cora.content) as a command line argument to this script.\n\nRun this script as follows:\n python graphsage-cora-example.py -l <path_to_cora_dataset>\n\nOther optional arguments can be seen by running\n python graphsage-cora-example.py --help\n\n\"\"\"\nimport os\nimport argparse\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport keras\nfrom keras import optimizers, losses, layers, metrics\nfrom sklearn import preprocessing, feature_extraction, model_selection\nimport stellargraph as sg\nfrom stellargraph.layer import GraphSAGE, MeanAggregator\nfrom stellargraph.mapper import GraphSAGENodeGenerator\n\n\ndef train(\n edgelist,\n node_data,\n layer_size,\n num_samples,\n batch_size=100,\n num_epochs=10,\n learning_rate=0.005,\n dropout=0.0,\n target_name=\"subject\",\n):\n \"\"\"\n Train a GraphSAGE model on the specified graph G with given parameters, evaluate it, and save the model.\n\n Args:\n edgelist: Graph edgelist\n node_data: Feature and target data for nodes\n layer_size: A list of number of hidden nodes in each layer\n num_samples: Number of neighbours to sample at each layer\n batch_size: Size of batch for inference\n num_epochs: Number of epochs to train the model\n learning_rate: Initial Learning rate\n dropout: The dropout (0->1)\n \"\"\"\n # Extract target and encode as a one-hot vector\n target_encoding = feature_extraction.DictVectorizer(sparse=False)\n node_targets = target_encoding.fit_transform(\n node_data[[target_name]].to_dict(\"records\")\n )\n node_ids = node_data.index\n\n # Extract the feature data. These are the feature vectors that the Keras model will use as input.\n # The CORA dataset contains attributes 'w_x' that correspond to words found in that publication.\n node_features = node_data[feature_names]\n\n # Create graph from edgelist and set node features and node type\n Gnx = nx.from_pandas_edgelist(edgelist)\n\n # Convert to StellarGraph and prepare for ML\n G = sg.StellarGraph(Gnx, node_type_name=\"label\", node_features=node_features)\n\n # Split nodes into train/test using stratification.\n train_nodes, test_nodes, train_targets, test_targets = model_selection.train_test_split(\n node_ids, node_targets, train_size=140, test_size=None, stratify=node_targets\n )\n\n # Split test set into test and validation\n val_nodes, test_nodes, val_targets, test_targets = model_selection.train_test_split(\n test_nodes, test_targets, train_size=500, test_size=None\n )\n\n # Create mappers for GraphSAGE that input data from the graph to the model\n generator = GraphSAGENodeGenerator(\n G, batch_size, num_samples, seed=42\n )\n train_gen = generator.flow(train_nodes, train_targets)\n val_gen = generator.flow(val_nodes, val_targets)\n\n # GraphSAGE model\n model = GraphSAGE(\n layer_sizes=layer_size, generator=train_gen, bias=True, dropout=dropout\n )\n # Expose the input and output sockets of the model:\n x_inp, x_out = model.default_model(flatten_output=True)\n\n # Snap the final estimator layer to x_out\n prediction = layers.Dense(units=train_targets.shape[1], activation=\"softmax\")(x_out)\n\n # Create Keras model for training\n model = keras.Model(inputs=x_inp, outputs=prediction)\n model.compile(\n optimizer=optimizers.Adam(lr=learning_rate),\n loss=losses.categorical_crossentropy,\n metrics=[metrics.categorical_accuracy],\n )\n\n # Train model\n history = model.fit_generator(\n train_gen,\n epochs=num_epochs,\n validation_data=val_gen,\n verbose=2,\n shuffle=True,\n )\n\n # Evaluate on test set and print metrics\n test_metrics = model.evaluate_generator(generator.flow(test_nodes, test_targets))\n print(\"\\nTest Set Metrics:\")\n for name, val in zip(model.metrics_names, test_metrics):\n print(\"\\t{}: {:0.4f}\".format(name, val))\n\n # Get predictions for all nodes\n all_predictions = model.predict_generator(generator.flow(node_ids))\n\n # Turn predictions back into the original categories\n node_predictions = pd.DataFrame(\n target_encoding.inverse_transform(all_predictions), index=node_ids\n )\n accuracy = np.mean(\n [\n \"subject=\" + gt_subject == p\n for gt_subject, p in zip(\n node_data[\"subject\"], node_predictions.idxmax(axis=1)\n )\n ]\n )\n print(\"All-node accuracy: {:3f}\".format(accuracy))\n\n # TODO: extract the GraphSAGE embeddings from x_out, and save/plot them\n\n # Save the trained model\n save_str = \"_n{}_l{}_d{}_r{}\".format(\n \"_\".join([str(x) for x in num_samples]),\n \"_\".join([str(x) for x in layer_size]),\n dropout,\n learning_rate,\n )\n model.save(\"cora_example_model\" + save_str + \".h5\")\n\n # We must also save the target encoding to convert model predictions\n with open(\"cora_example_encoding\" + save_str + \".pkl\", \"wb\") as f:\n pickle.dump([target_encoding], f)\n\n\ndef test(edgelist, node_data, model_file, batch_size, target_name=\"subject\"):\n \"\"\"\n Load the serialized model and evaluate on all nodes in the graph.\n\n Args:\n G: NetworkX graph file\n target_converter: Class to give numeric representations of node targets\n feature_converter: CLass to give numeric representations of the node features\n model_file: Location of Keras model to load\n batch_size: Size of batch for inference\n \"\"\"\n # Extract the feature data. These are the feature vectors that the Keras model will use as input.\n # The CORA dataset contains attributes 'w_x' that correspond to words found in that publication.\n node_features = node_data[feature_names]\n\n # Create graph from edgelist and set node features and node type\n Gnx = nx.from_pandas_edgelist(edgelist)\n\n # We must also save the target encoding to convert model predictions\n encoder_file = model_file.replace(\n \"cora_example_model\", \"cora_example_encoding\"\n ).replace(\".h5\", \".pkl\")\n with open(encoder_file, \"rb\") as f:\n target_encoding = pickle.load(f)[0]\n\n # Endode targets with pre-trained encoder\n node_targets = target_encoding.transform(\n node_data[[target_name]].to_dict(\"records\")\n )\n node_ids = node_data.index\n\n # Convert to StellarGraph and prepare for ML\n G = sg.StellarGraph(Gnx, node_features=node_features)\n\n # Load Keras model\n model = keras.models.load_model(\n model_file, custom_objects={\"MeanAggregator\": MeanAggregator}\n )\n print(\"Loaded model:\")\n model.summary()\n\n # Get required samples from model\n # TODO: Can we move this to the library?\n num_samples = [\n int(model.input_shape[ii + 1][1] / model.input_shape[ii][1])\n for ii in range(len(model.input_shape) - 1)\n ]\n\n # Create mappers for GraphSAGE that input data from the graph to the model\n generator = GraphSAGENodeGenerator(\n G, batch_size, num_samples, seed=42\n )\n all_gen = generator.flow(node_ids, node_targets)\n\n # Evaluate and print metrics\n all_metrics = model.evaluate_generator(all_gen)\n\n print(\"\\nAll-node Evaluation:\")\n for name, val in zip(model.metrics_names, all_metrics):\n print(\"\\t{}: {:0.4f}\".format(name, val))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Graph node classification using GraphSAGE\"\n )\n parser.add_argument(\n \"-c\",\n \"--checkpoint\",\n nargs=\"?\",\n type=str,\n default=None,\n help=\"Load a saved checkpoint .h5 file\",\n )\n parser.add_argument(\n \"-b\", \"--batch_size\", type=int, default=20, help=\"Batch size for training\"\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n type=int,\n default=10,\n help=\"The number of epochs to train the model\",\n )\n parser.add_argument(\n \"-d\",\n \"--dropout\",\n type=float,\n default=0.3,\n help=\"Dropout rate for the GraphSAGE model, between 0.0 and 1.0\",\n )\n parser.add_argument(\n \"-r\",\n \"--learningrate\",\n type=float,\n default=0.005,\n help=\"Initial learning rate for model training\",\n )\n parser.add_argument(\n \"-n\",\n \"--neighbour_samples\",\n type=int,\n nargs=\"*\",\n default=[20, 10],\n help=\"The number of neighbour nodes sampled at each GraphSAGE layer\",\n )\n parser.add_argument(\n \"-s\",\n \"--layer_size\",\n type=int,\n nargs=\"*\",\n default=[20, 20],\n help=\"The number of hidden features at each GraphSAGE layer\",\n )\n parser.add_argument(\n \"-l\",\n \"--location\",\n type=str,\n default=None,\n help=\"Location of the CORA dataset (directory)\",\n )\n parser.add_argument(\n \"-t\",\n \"--target\",\n type=str,\n default=\"subject\",\n help=\"The target node attribute (categorical)\",\n )\n args, cmdline_args = parser.parse_known_args()\n\n # Load the dataset - this assumes it is the CORA dataset\n # Load graph edgelist\n graph_loc = os.path.expanduser(args.location)\n edgelist = pd.read_table(\n os.path.join(graph_loc, \"cora.cites\"), header=None, names=[\"source\", \"target\"]\n )\n\n # Load node features\n # The CORA dataset contains binary attributes 'w_x' that correspond to whether the corresponding keyword\n # (out of 1433 keywords) is found in the corresponding publication.\n feature_names = [\"w_{}\".format(ii) for ii in range(1433)]\n # Also, there is a \"subject\" column\n column_names = feature_names + [\"subject\"]\n node_data = pd.read_table(\n os.path.join(graph_loc, \"cora.content\"), header=None, names=column_names\n )\n\n if args.checkpoint is None:\n train(\n edgelist,\n node_data,\n args.layer_size,\n args.neighbour_samples,\n args.batch_size,\n args.epochs,\n args.learningrate,\n args.dropout,\n )\n else:\n test(edgelist, node_data, args.checkpoint, args.batch_size)\n" ]
[ [ "sklearn.feature_extraction.DictVectorizer", "sklearn.model_selection.train_test_split" ] ]
feiga/fedlearner
[ "99a19934b872a9fba6d85ae018b0ec145612fbca" ]
[ "test/test_compressed_raw_data_visitor.py" ]
[ "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\nimport unittest\nimport os\n\nfrom tensorflow.compat.v1 import gfile\nimport tensorflow.compat.v1 as tf\ntf.enable_eager_execution()\n\nfrom fedlearner.common import etcd_client\nfrom fedlearner.common import common_pb2 as common_pb\nfrom fedlearner.common import data_join_service_pb2 as dj_pb\nfrom fedlearner.data_join import raw_data_manifest_manager, raw_data_visitor, common\n\nclass TestRawDataVisitor(unittest.TestCase):\n def test_raw_data_visitor(self):\n self.data_source = common_pb.DataSource()\n self.data_source.data_source_meta.name = 'fclh_test'\n self.data_source.data_source_meta.partition_num = 1\n self.data_source.raw_data_dir = \"./test/compressed_raw_data\"\n self.etcd = etcd_client.EtcdClient('test_cluster', 'localhost:2379',\n 'fedlearner', True)\n self.etcd.delete_prefix(self.data_source.data_source_meta.name)\n self.assertEqual(self.data_source.data_source_meta.partition_num, 1)\n partition_dir = os.path.join(self.data_source.raw_data_dir, common.partition_repr(0))\n self.assertTrue(gfile.Exists(partition_dir))\n manifest_manager = raw_data_manifest_manager.RawDataManifestManager(\n self.etcd, self.data_source)\n add_fpaths = dj_pb.RawDataFilePaths(\n file_paths=[os.path.join(partition_dir, \"0-0.idx\")],\n dedup=True\n )\n manifest_manager.add_raw_data(0, [os.path.join(partition_dir, \"0-0.idx\")], True)\n raw_data_options = dj_pb.RawDataOptions(\n raw_data_iter='TF_DATASET',\n compressed_type='GZIP'\n )\n rdm = raw_data_visitor.RawDataManager(self.etcd, self.data_source,0)\n self.assertTrue(rdm.check_index_meta_by_process_index(0))\n rdv = raw_data_visitor.RawDataVisitor(self.etcd, self.data_source, 0,\n raw_data_options)\n expected_index = 0\n for (index, item) in rdv:\n if index > 0 and index % 32 == 0:\n print(\"{} {}\".format(index, item.example_id))\n self.assertEqual(index, expected_index)\n expected_index += 1\n self.assertGreater(expected_index, 0)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.enable_eager_execution" ] ]
arthur-e/spotpy
[ "d689678a98fcd26769d581218024d5f058f0d027" ]
[ "spotpy/algorithms/abc.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nCopyright (c) 2018 by Tobias Houska\nThis file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).\n:author: Patrick Lauer\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom . import _algorithm\nimport numpy as np\nimport random\n\n\nclass abc(_algorithm):\n \"\"\"\n This class holds the Artificial Bee Colony (ABC) algorithm, based on Karaboga (2007).\n D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005.\n D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Input\n ----------\n spot_setup: class\n model: function\n Should be callable with a parameter combination of the parameter-function\n and return an list of simulation results (as long as evaluation list)\n parameter: function\n When called, it should return a random parameter combination. Which can\n be e.g. uniform or Gaussian\n objectivefunction: function\n Should return the objectivefunction for a given list of a model simulation and\n observation.\n evaluation: function\n Should return the true values as return by the model.\n\n dbname: str\n * Name of the database where parameter, objectivefunction value and simulation results will be saved.\n\n dbformat: str\n * ram: fast suited for short sampling time. no file will be created and results are saved in an array.\n * csv: A csv file will be created, which you can import afterwards.\n\n parallel: str\n * seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.\n * mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).\n\n save_sim: boolean\n * True: Simulation results will be saved\n * False: Simulation results will not be saved\n \"\"\"\n kwargs['optimization_direction'] = 'maximize'\n kwargs['algorithm_name'] = 'Artificial Bee Colony (ABC) algorithm'\n super(abc, self).__init__(*args, **kwargs)\n\n def sample(self, repetitions, eb=48, a=(1 / 10),\n peps=0.0001, ownlimit=False, limit=24):\n \"\"\"\n\n\n Parameters\n ----------\n repetitions: int\n maximum number of function evaluations allowed during optimization\n eb: int\n number of employed bees (half of population size)\n a: float\n mutation factor\n peps: float\n Convergence criterium\n ownlimit: boolean\n determines if an userdefined limit is set or not\n limit: int\n sets the limit\n \"\"\"\n self.set_repetiton(repetitions)\n print(\n 'Starting the ABC algotrithm with ' +\n str(repetitions) +\n ' repetitions...')\n # Initialize ABC parameters:\n randompar = self.parameter()['random']\n self.nopt = randompar.size\n random.seed()\n if ownlimit == True:\n self.limit = limit\n else:\n self.limit = eb\n lb, ub = self.parameter()['minbound'], self.parameter()['maxbound']\n # Initialization\n work = []\n icall = 0\n gnrng = 1e100\n # Calculate the objective function\n param_generator = (\n (rep, self.parameter()['random']) for rep in range(eb))\n for rep, randompar, simulations in self.repeat(param_generator):\n # Calculate fitness\n like = self.postprocessing(\n rep, randompar, simulations, chains=1, negativlike=True)\n c = 0\n p = 0\n work.append([like, randompar, like, randompar, c, p])\n icall += 1\n if self.status.stop:\n print('Stopping sampling')\n break\n\n while icall < repetitions and gnrng > peps:\n psum = 0\n # Employed bee phase\n # Generate new input parameters\n for i, val in enumerate(work):\n k = i\n while k == i:\n k = random.randint(0, (eb - 1))\n j = random.randint(0, (self.nopt - 1))\n work[i][3][j] = work[i][1][j] + \\\n random.uniform(-a, a) * (work[i][1][j] - work[k][1][j])\n if work[i][3][j] < lb[j]:\n work[i][3][j] = lb[j]\n if work[i][3][j] > ub[j]:\n work[i][3][j] = ub[j]\n\n # Calculate the objective function\n param_generator = ((rep, work[rep][3]) for rep in range(eb))\n for rep, randompar, simulations in self.repeat(param_generator):\n # Calculate fitness\n clike = self.postprocessing(\n icall + eb, randompar, simulations, chains=2, negativlike=True)\n if clike > work[rep][0]:\n work[rep][1] = work[rep][3]\n work[rep][0] = clike\n work[rep][4] = 0\n else:\n work[rep][4] = work[rep][4] + 1\n icall += 1\n if self.status.stop:\n print('Stopping samplig')\n break # Probability distribution for roulette wheel selection\n bn = []\n for i, val in enumerate(work):\n psum = psum + (1 / work[i][0])\n for i, val in enumerate(work):\n work[i][5] = ((1 / work[i][0]) / psum)\n bn.append(work[i][5])\n bounds = np.cumsum(bn)\n # Onlooker bee phase\n # Roulette wheel selection\n for i, val in enumerate(work):\n pn = random.uniform(0, 1)\n k = i\n while k == i:\n k = random.randint(0, eb - 1)\n for t, vol in enumerate(bounds):\n if bounds[t] - pn >= 0:\n z = t\n break\n j = random.randint(0, (self.nopt - 1))\n # Generate new input parameters\n try:\n work[i][3][j] = work[z][1][j] + \\\n random.uniform(-a, a) * (work[z][1][j] - work[k][1][j])\n except UnboundLocalError:\n z = 0\n work[i][3][j] = work[z][1][j] + \\\n random.uniform(-a, a) * (work[z][1][j] - work[k][1][j])\n if work[i][3][j] < lb[j]:\n work[i][3][j] = lb[j]\n if work[i][3][j] > ub[j]:\n work[i][3][j] = ub[j]\n # Calculate the objective function\n param_generator = ((rep, work[rep][3]) for rep in range(eb))\n for rep, randompar, simulations in self.repeat(param_generator):\n # Calculate fitness\n clike = self.postprocessing(\n icall + eb, randompar, simulations, chains=3, negativlike=True)\n if clike > work[rep][0]:\n work[rep][1] = work[rep][3]\n work[rep][0] = clike\n work[rep][4] = 0\n else:\n work[rep][4] = work[rep][4] + 1\n icall += 1\n if self.status.stop:\n print('Stopping samplig')\n break\n # Scout bee phase\n for i, val in enumerate(work):\n if work[i][4] >= self.limit:\n work[i][1] = self.parameter()['random']\n work[i][4] = 0\n t, work[i][0], simulations = self.simulate(\n (icall, work[i][1]))\n clike = self.postprocessing(\n icall + eb, randompar, simulations, chains=4, negativlike=True)\n work[i][0] = clike\n icall += 1\n if self.status.stop:\n print('Stopping samplig')\n break\n gnrng = -self.status.objectivefunction_max\n if icall >= repetitions:\n print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')\n print('ON THE MAXIMUM NUMBER OF TRIALS ')\n print(repetitions)\n print('HAS BEEN EXCEEDED.')\n\n if gnrng < peps:\n print(\n 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE AT RUN')\n print(icall)\n self.final_call()\n" ]
[ [ "numpy.cumsum" ] ]
peter9811/meerk40t
[ "370b0bb4df2d310d6d8bed9179ab29960e8b3d62" ]
[ "svgelements.py" ]
[ "# -*- coding: ISO-8859-1 -*-\n\nimport re\n\ntry:\n from collections.abc import MutableSequence # noqa\nexcept ImportError:\n from collections import MutableSequence # noqa\nfrom copy import copy\n\nfrom math import (\n ceil,\n cos,\n radians,\n sin,\n sqrt,\n hypot,\n atan,\n atan2,\n tan,\n degrees,\n acos,\n log,\n)\n\nfrom xml.etree.ElementTree import iterparse\n\ntry:\n from math import tau\nexcept ImportError:\n from math import pi\n\n tau = pi * 2\n\n\"\"\"\nThe path elements are derived from regebro's svg.path project ( https://github.com/regebro/svg.path ) with\nsome of the math from mathandy's svgpathtools project ( https://github.com/mathandy/svgpathtools ).\n\nThe goal is to provide svg like path objects and structures. The svg standard 1.1 and elements of 2.0 will\nbe used to provide much of the decisions within path objects. Such that if there is a question on\nimplementation if the SVG documentation has a methodology it should be used.\n\nThough not required the SVGImage class acquires new functionality if provided with PIL/Pillow as an import\nand the Arc can do exact arc calculations if scipy is installed.\n\"\"\"\n\nSVGELEMENTS_VERSION = \"1.4.7\"\n\nMIN_DEPTH = 5\nERROR = 1e-12\n\nmax_depth = 0\n\n# SVG STATIC VALUES\nDEFAULT_PPI = 96.0\nSVG_NAME_TAG = \"svg\"\nSVG_ATTR_VERSION = \"version\"\nSVG_VALUE_VERSION = \"1.1\"\nSVG_ATTR_XMLNS = \"xmlns\"\nSVG_VALUE_XMLNS = \"http://www.w3.org/2000/svg\"\nSVG_ATTR_XMLNS_LINK = \"xmlns:xlink\"\nSVG_VALUE_XLINK = \"http://www.w3.org/1999/xlink\"\nSVG_ATTR_XMLNS_EV = \"xmlns:ev\"\nSVG_VALUE_XMLNS_EV = \"http://www.w3.org/2001/xml-events\"\n\nXLINK_HREF = \"{http://www.w3.org/1999/xlink}href\"\nSVG_HREF = \"href\"\nSVG_ATTR_WIDTH = \"width\"\nSVG_ATTR_HEIGHT = \"height\"\nSVG_ATTR_VIEWBOX = \"viewBox\"\nSVG_VIEWBOX_TRANSFORM = \"viewbox_transform\"\nSVG_TAG_PATH = \"path\"\nSVG_TAG_GROUP = \"g\"\nSVG_TAG_RECT = \"rect\"\nSVG_TAG_CIRCLE = \"circle\"\nSVG_TAG_ELLIPSE = \"ellipse\"\nSVG_TAG_LINE = \"line\"\nSVG_TAG_POLYLINE = \"polyline\"\nSVG_TAG_POLYGON = \"polygon\"\nSVG_TAG_TEXT = \"text\"\nSVG_TAG_TSPAN = \"tspan\"\nSVG_TAG_IMAGE = \"image\"\nSVG_TAG_DESC = \"desc\"\nSVG_TAG_TITLE = \"title\"\nSVG_TAG_METADATA = \"metadata\"\nSVG_TAG_STYLE = \"style\"\nSVG_TAG_DEFS = \"defs\"\nSVG_TAG_USE = \"use\"\nSVG_TAG_CLIPPATH = \"clipPath\"\nSVG_TAG_PATTERN = \"pattern\"\n\nSVG_STRUCT_ATTRIB = \"attributes\"\nSVG_ATTR_ID = \"id\"\nSVG_ATTR_DATA = \"d\"\nSVG_ATTR_DISPLAY = \"display\"\nSVG_ATTR_COLOR = \"color\"\nSVG_ATTR_FILL = \"fill\"\nSVG_ATTR_FILL_OPACITY = \"fill-opacity\"\nSVG_ATTR_STROKE = \"stroke\"\nSVG_ATTR_STROKE_OPACITY = \"stroke-opacity\"\nSVG_ATTR_STROKE_WIDTH = \"stroke-width\"\nSVG_ATTR_TRANSFORM = \"transform\"\nSVG_ATTR_STYLE = \"style\"\nSVG_ATTR_CLASS = \"class\"\nSVG_ATTR_CLIP_PATH = \"clip-path\"\nSVG_ATTR_CLIP_RULE = \"clip-rule\"\nSVG_ATTR_CLIP_UNIT_TYPE = \"clipPathUnits\"\nSVG_ATTR_CENTER_X = \"cx\"\nSVG_ATTR_CENTER_Y = \"cy\"\nSVG_ATTR_RADIUS_X = \"rx\"\nSVG_ATTR_RADIUS_Y = \"ry\"\nSVG_ATTR_RADIUS = \"r\"\nSVG_ATTR_POINTS = \"points\"\nSVG_ATTR_PRESERVEASPECTRATIO = \"preserveAspectRatio\"\nSVG_ATTR_X = \"x\"\nSVG_ATTR_Y = \"y\"\nSVG_ATTR_X0 = \"x0\"\nSVG_ATTR_Y0 = \"y0\"\nSVG_ATTR_X1 = \"x1\"\nSVG_ATTR_Y1 = \"y1\"\nSVG_ATTR_X2 = \"x2\"\nSVG_ATTR_Y2 = \"y2\"\nSVG_ATTR_DX = \"dx\"\nSVG_ATTR_DY = \"dy\"\nSVG_ATTR_TAG = \"tag\"\nSVG_ATTR_FONT = \"font\"\nSVG_ATTR_FONT_FAMILY = \"font-family\" # Serif, sans-serif, cursive, fantasy, monospace\nSVG_ATTR_FONT_FACE = \"font-face\"\nSVG_ATTR_FONT_SIZE = \"font-size\"\nSVG_ATTR_FONT_WEIGHT = \"font-weight\" # normal, bold, bolder, lighter, 100-900\nSVG_ATTR_TEXT_ANCHOR = \"text-anchor\"\nSVG_ATTR_PATTERN_CONTENT_UNITS = \"patternContentUnits\"\nSVG_ATTR_PATTERN_TRANSFORM = \"patternTransform\"\nSVG_ATTR_PATTERN_UNITS = \"patternUnits\"\n\nSVG_ATTR_VECTOR_EFFECT = \"vector-effect\"\n\nSVG_UNIT_TYPE_USERSPACEONUSE = \"userSpaceOnUse\"\nSVG_UNIT_TYPE_OBJECTBOUNDINGBOX = \"objectBoundingBox\"\n\nSVG_RULE_NONZERO = \"nonzero\"\nSVG_RULE_EVENODD = \"evenodd\"\n\nSVG_TRANSFORM_MATRIX = \"matrix\"\nSVG_TRANSFORM_TRANSLATE = \"translate\"\nSVG_TRANSFORM_SCALE = \"scale\"\nSVG_TRANSFORM_ROTATE = \"rotate\"\nSVG_TRANSFORM_SKEW_X = \"skewx\"\nSVG_TRANSFORM_SKEW_Y = \"skewy\"\nSVG_TRANSFORM_SKEW = \"skew\"\nSVG_TRANSFORM_TRANSLATE_X = \"translatex\"\nSVG_TRANSFORM_TRANSLATE_Y = \"translatey\"\nSVG_TRANSFORM_SCALE_X = \"scalex\"\nSVG_TRANSFORM_SCALE_Y = \"scaley\"\n\nSVG_VALUE_NONE = \"none\"\nSVG_VALUE_CURRENT_COLOR = \"currentColor\"\n\nSVG_VALUE_NON_SCALING_STROKE = \"non-scaling-stroke\"\n\nPATTERN_WS = r\"[\\s\\t\\n]*\"\nPATTERN_COMMA = r\"(?:\\s*,\\s*|\\s+|(?=-))\"\nPATTERN_COMMAWSP = r\"[ ,\\t\\n\\x09\\x0A\\x0C\\x0D]+\"\nPATTERN_FLOAT = r\"[-+]?[0-9]*\\.?[0-9]+(?:[eE][-+]?[0-9]+)?\"\nPATTERN_LENGTH_UNITS = \"cm|mm|Q|in|pt|pc|px|em|cx|ch|rem|vw|vh|vmin|vmax\"\nPATTERN_ANGLE_UNITS = \"deg|grad|rad|turn\"\nPATTERN_TIME_UNITS = \"s|ms\"\nPATTERN_FREQUENCY_UNITS = \"Hz|kHz\"\nPATTERN_RESOLUTION_UNITS = \"dpi|dpcm|dppx\"\nPATTERN_PERCENT = \"%\"\nPATTERN_TRANSFORM = (\n SVG_TRANSFORM_MATRIX\n + \"|\"\n + SVG_TRANSFORM_TRANSLATE\n + \"|\"\n + SVG_TRANSFORM_TRANSLATE_X\n + \"|\"\n + SVG_TRANSFORM_TRANSLATE_Y\n + \"|\"\n + SVG_TRANSFORM_SCALE\n + \"|\"\n + SVG_TRANSFORM_SCALE_X\n + \"|\"\n + SVG_TRANSFORM_SCALE_Y\n + \"|\"\n + SVG_TRANSFORM_ROTATE\n + \"|\"\n + SVG_TRANSFORM_SKEW\n + \"|\"\n + SVG_TRANSFORM_SKEW_X\n + \"|\"\n + SVG_TRANSFORM_SKEW_Y\n)\nPATTERN_TRANSFORM_UNITS = (\n PATTERN_LENGTH_UNITS + \"|\" + PATTERN_ANGLE_UNITS + \"|\" + PATTERN_PERCENT\n)\n\nREGEX_IRI = re.compile(r\"url\\(#?(.*)\\)\")\nREGEX_FLOAT = re.compile(PATTERN_FLOAT)\nREGEX_COORD_PAIR = re.compile(\n \"(%s)%s(%s)\" % (PATTERN_FLOAT, PATTERN_COMMA, PATTERN_FLOAT)\n)\nREGEX_TRANSFORM_TEMPLATE = re.compile(\n r\"(?u)(%s)%s\\(([^)]+)\\)\" % (PATTERN_TRANSFORM, PATTERN_WS)\n)\nREGEX_TRANSFORM_PARAMETER = re.compile(\n \"(%s)%s(%s)?\" % (PATTERN_FLOAT, PATTERN_WS, PATTERN_TRANSFORM_UNITS)\n)\nREGEX_COLOR_HEX = re.compile(r\"^#?([0-9A-Fa-f]{3,8})$\")\nREGEX_COLOR_RGB = re.compile(\n r\"rgba?\\(\\s*(%s)\\s*,\\s*(%s)\\s*,\\s*(%s)\\s*(?:,\\s*(%s)\\s*)?\\)\"\n % (PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT)\n)\nREGEX_COLOR_RGB_PERCENT = re.compile(\n r\"rgba?\\(\\s*(%s)%%\\s*,\\s*(%s)%%\\s*,\\s*(%s)%%\\s*(?:,\\s*(%s)\\s*)?\\)\"\n % (PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT)\n)\nREGEX_COLOR_HSL = re.compile(\n r\"hsla?\\(\\s*(%s)\\s*,\\s*(%s)%%\\s*,\\s*(%s)%%\\s*(?:,\\s*(%s)\\s*)?\\)\"\n % (PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT, PATTERN_FLOAT)\n)\nREGEX_LENGTH = re.compile(r\"(%s)([A-Za-z%%]*)\" % PATTERN_FLOAT)\nREGEX_CSS_STYLE = re.compile(r\"([^{]+)\\s*\\{\\s*([^}]+)\\s*\\}\")\nREGEX_CSS_FONT = re.compile(\n r\"(?:(normal|italic|oblique)\\s|(normal|small-caps)\\s|(normal|bold|bolder|lighter|\\d{3})\\s|(normal|ultra-condensed|extra-condensed|condensed|semi-condensed|semi-expanded|expanded|extra-expanded|ultra-expanded)\\s)*\\s*(xx-small|x-small|small|medium|large|x-large|xx-large|larger|smaller|\\d+(?:em|pt|pc|px|%))(?:/(xx-small|x-small|small|medium|large|x-large|xx-large|larger|smaller|\\d+(?:em|pt|pc|px|%)))?\\s*(.*),?\\s+(serif|sans-serif|cursive|fantasy|monospace);?\"\n)\n\nsvg_parse = [(\"COMMAND\", r\"[MmZzLlHhVvCcSsQqTtAa]\"), (\"SKIP\", PATTERN_COMMAWSP)]\nsvg_re = re.compile(\"|\".join(\"(?P<%s>%s)\" % pair for pair in svg_parse))\nnum_parse = [(\"FLOAT\", PATTERN_FLOAT), (\"CLOSE\", r\"[Zz]\"), (\"SKIP\", PATTERN_COMMAWSP)]\nnum_re = re.compile(\"|\".join(\"(?P<%s>%s)\" % pair for pair in num_parse))\nflag_parse = [(\"FLAG\", r\"[01]\"), (\"SKIP\", PATTERN_COMMAWSP)]\nflag_re = re.compile(\"|\".join(\"(?P<%s>%s)\" % pair for pair in flag_parse))\n\n\nclass SVGLexicalParser:\n def __init__(self):\n self.parser = None\n self.pathd = None\n self.pos = 0\n self.limit = 0\n self.inline_close = None\n\n def _command(self):\n while self.pos < self.limit:\n match = svg_re.match(self.pathd, self.pos)\n if match is None:\n return None # Did not match at command sequence.\n self.pos = match.end()\n kind = match.lastgroup\n if kind == \"SKIP\":\n continue\n return match.group()\n return None\n\n def _more(self):\n while self.pos < self.limit:\n match = num_re.match(self.pathd, self.pos)\n if match is None:\n return False\n kind = match.lastgroup\n if kind == \"CLOSE\":\n self.inline_close = match.group()\n return False\n if kind == \"SKIP\":\n # move skipped elements forward.\n self.pos = match.end()\n continue\n return True\n return None\n\n def _number(self):\n while self.pos < self.limit:\n match = num_re.match(self.pathd, self.pos)\n if match is None:\n break # No more matches.\n kind = match.lastgroup\n if kind == \"CLOSE\":\n # Inline Close\n self.inline_close = match.group()\n return None\n self.pos = match.end()\n if kind == \"SKIP\":\n continue\n return float(match.group())\n return None\n\n def _flag(self):\n while self.pos < self.limit:\n match = flag_re.match(self.pathd, self.pos)\n if match is None:\n break # No more matches.\n self.pos = match.end()\n kind = match.lastgroup\n if kind == \"SKIP\":\n continue\n return bool(int(match.group()))\n return None\n\n def _coord(self):\n x = self._number()\n if x is None:\n return None\n y = self._number()\n if y is None:\n raise ValueError\n return x, y\n\n def _rcoord(self):\n position = self._coord()\n if position is None:\n return None\n current_pos = self.parser.current_point\n if current_pos is None:\n return position\n return position[0] + current_pos.x, position[1] + current_pos.y\n\n def parse(self, parser, pathd):\n self.parser = parser\n self.parser.start()\n self.pathd = pathd\n self.pos = 0\n self.limit = len(pathd)\n while True:\n cmd = self._command()\n if cmd is None:\n return\n elif cmd == \"z\" or cmd == \"Z\":\n if self._more():\n raise ValueError\n self.parser.closed(relative=cmd.islower())\n self.inline_close = None\n continue\n elif cmd == \"m\":\n if not self._more():\n raise ValueError\n coord = self._rcoord()\n self.parser.move(coord, relative=True)\n while self._more():\n coord = self._rcoord()\n self.parser.line(coord, relative=True)\n elif cmd == \"M\":\n if not self._more():\n raise ValueError\n coord = self._coord()\n self.parser.move(coord, relative=False)\n while self._more():\n coord = self._coord()\n self.parser.line(coord, relative=False)\n elif cmd == \"l\":\n while True:\n coord = self._rcoord()\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.line(coord, relative=True)\n if not self._more():\n break\n elif cmd == \"L\":\n while True:\n coord = self._coord()\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.line(coord, relative=False)\n if not self._more():\n break\n elif cmd == \"t\":\n while True:\n coord = self._rcoord()\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.smooth_quad(coord, relative=True)\n if not self._more():\n break\n elif cmd == \"T\":\n while True:\n coord = self._coord()\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.smooth_quad(coord, relative=False)\n if not self._more():\n break\n elif cmd == \"h\":\n while True:\n value = self._number()\n self.parser.horizontal(value, relative=True)\n if not self._more():\n break\n elif cmd == \"H\":\n while True:\n value = self._number()\n self.parser.horizontal(value, relative=False)\n if not self._more():\n break\n elif cmd == \"v\":\n while True:\n value = self._number()\n self.parser.vertical(value, relative=True)\n if not self._more():\n break\n elif cmd == \"V\":\n while self._more():\n value = self._number()\n self.parser.vertical(value, relative=False)\n elif cmd == \"c\":\n while True:\n coord1, coord2, coord3 = (\n self._rcoord(),\n self._rcoord(),\n self._rcoord(),\n )\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n if coord3 is None:\n coord3 = self.inline_close\n if coord3 is None:\n raise ValueError\n self.parser.cubic(coord1, coord2, coord3, relative=True)\n if not self._more():\n break\n elif cmd == \"C\":\n while True:\n coord1, coord2, coord3 = self._coord(), self._coord(), self._coord()\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n if coord3 is None:\n coord3 = self.inline_close\n if coord3 is None:\n raise ValueError\n self.parser.cubic(coord1, coord2, coord3, relative=False)\n if not self._more():\n break\n elif cmd == \"q\":\n while True:\n coord1, coord2 = self._rcoord(), self._rcoord()\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n self.parser.quad(coord1, coord2, relative=True)\n if not self._more():\n break\n elif cmd == \"Q\":\n while True:\n coord1, coord2 = self._coord(), self._coord()\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n self.parser.quad(coord1, coord2, relative=False)\n if not self._more():\n break\n elif cmd == \"s\":\n while True:\n coord1, coord2 = self._rcoord(), self._rcoord()\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n self.parser.smooth_cubic(coord1, coord2, relative=True)\n if not self._more():\n break\n elif cmd == \"S\":\n while True:\n coord1, coord2 = self._coord(), self._coord()\n if coord1 is None:\n coord1 = self.inline_close\n if coord1 is None:\n raise ValueError\n if coord2 is None:\n coord2 = self.inline_close\n if coord2 is None:\n raise ValueError\n self.parser.smooth_cubic(coord1, coord2, relative=False)\n if not self._more():\n break\n elif cmd == \"a\":\n while self._more():\n rx, ry, rotation, arc, sweep, coord = (\n self._number(),\n self._number(),\n self._number(),\n self._flag(),\n self._flag(),\n self._rcoord(),\n )\n if sweep is None:\n raise ValueError\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.arc(rx, ry, rotation, arc, sweep, coord, relative=True)\n elif cmd == \"A\":\n while self._more():\n rx, ry, rotation, arc, sweep, coord = (\n self._number(),\n self._number(),\n self._number(),\n self._flag(),\n self._flag(),\n self._coord(),\n )\n if coord is None:\n coord = self.inline_close\n if coord is None:\n raise ValueError\n self.parser.arc(rx, ry, rotation, arc, sweep, coord, relative=False)\n self.parser.end()\n\n\nclass Length(object):\n \"\"\"\n SVGLength as used in SVG\n\n Length are lazy solving values. Several conversion values are typically unknown by default and length simply\n stores that ambiguity. So we can have a length of 50% and without calling .value(relative_length=3000) it will\n simply store as 50%. Likewise you can have discrete values like 30cm or 20in which have knowable discrete values\n but are not knowable in pixels unless a PPI value is supplied. We can say .value(relative_length=30cm, PPI=96) and\n solve this for a value like 12%. We can also convert values between knowable lengths. So 30cm in 300mm regardless\n whether we know how to convert this to pixels. 0% is 0 in any units or relative values. We can convert pixels to\n pc and pt without issue. We can convert vh, vw, vmax, vmin values if we know viewbox values. We can convert em\n values if we know the font_size. We can add values together if they are convertible units. Length(\"20in\") + \"3cm\".\n\n If .value() cannot solve for the value with the given information then it will return a Length value. If it can\n be solved it will return a float.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args) == 1:\n value = args[0]\n if value is None:\n self.amount = None\n self.units = None\n return\n s = str(value)\n for m in REGEX_LENGTH.findall(s):\n self.amount = float(m[0])\n self.units = m[1]\n return\n elif len(args) == 2:\n self.amount = args[0]\n self.units = args[1]\n return\n self.amount = 0.0\n self.units = \"\"\n\n def __float__(self):\n if self.amount is None:\n return None\n if self.units == \"pt\":\n return self.amount * 1.3333\n elif self.units == \"pc\":\n return self.amount * 16.0\n return self.amount\n\n def __imul__(self, other):\n if isinstance(other, (int, float)):\n self.amount *= other\n return self\n if self.amount == 0.0:\n return 0.0\n if isinstance(other, str):\n other = Length(other)\n if isinstance(other, Length):\n if other.amount == 0.0:\n self.amount = 0.0\n return self\n if self.units == other.units:\n self.amount *= other.amount\n return self\n if self.units == \"%\":\n self.units = other.units\n self.amount = self.amount * other.amount / 100.0\n return self\n elif other.units == \"%\":\n self.amount = self.amount * other.amount / 100.0\n return self\n raise ValueError\n\n def __iadd__(self, other):\n if not isinstance(other, Length):\n other = Length(other)\n if self.units == other.units:\n self.amount += other.amount\n return self\n if self.amount == 0:\n self.amount = other.amount\n self.units = other.units\n return self\n if other.amount == 0:\n return self\n if self.units == \"px\" or self.units == \"\":\n if other.units == \"px\" or other.units == \"\":\n self.amount += other.amount\n elif other.units == \"pt\":\n self.amount += other.amount * 1.3333\n elif other.units == \"pc\":\n self.amount += other.amount * 16.0\n else:\n raise ValueError\n return self\n if self.units == \"pt\":\n if other.units == \"px\" or other.units == \"\":\n self.amount += other.amount / 1.3333\n elif other.units == \"pc\":\n self.amount += other.amount * 12.0\n else:\n raise ValueError\n return self\n elif self.units == \"pc\":\n if other.units == \"px\" or other.units == \"\":\n self.amount += other.amount / 16.0\n elif other.units == \"pt\":\n self.amount += other.amount / 12.0\n else:\n raise ValueError\n return self\n elif self.units == \"cm\":\n if other.units == \"mm\":\n self.amount += other.amount / 10.0\n elif other.units == \"in\":\n self.amount += other.amount / 0.393701\n else:\n raise ValueError\n return self\n elif self.units == \"mm\":\n if other.units == \"cm\":\n self.amount += other.amount * 10.0\n elif other.units == \"in\":\n self.amount += other.amount / 0.0393701\n else:\n raise ValueError\n return self\n elif self.units == \"in\":\n if other.units == \"cm\":\n self.amount += other.amount * 0.393701\n elif other.units == \"mm\":\n self.amount += other.amount * 0.0393701\n else:\n raise ValueError\n return self\n raise ValueError(\"%s units were not determined.\" % self.units)\n\n def __abs__(self):\n c = self.__copy__()\n c.amount = abs(c.amount)\n return c\n\n def __truediv__(self, other):\n if isinstance(other, (int, float)):\n c = self.__copy__()\n c.amount /= other\n return c\n if self.amount == 0.0:\n return 0.0\n if isinstance(other, str):\n other = Length(other)\n if isinstance(other, Length):\n if self.units == other.units:\n q = self.amount / other.amount\n return q # no units\n if self.units == \"px\" or self.units == \"\":\n if other.units == \"px\" or other.units == \"\":\n return self.amount / other.amount\n elif other.units == \"pt\":\n return self.amount / (other.amount * 1.3333)\n elif other.units == \"pc\":\n return self.amount / (other.amount * 16.0)\n else:\n raise ValueError\n if self.units == \"pt\":\n if other.units == \"px\" or other.units == \"\":\n return self.amount / (other.amount / 1.3333)\n elif other.units == \"pc\":\n return self.amount / (other.amount * 12.0)\n else:\n raise ValueError\n if self.units == \"pc\":\n if other.units == \"px\" or other.units == \"\":\n return self.amount / (other.amount / 16.0)\n elif other.units == \"pt\":\n return self.amount / (other.amount / 12.0)\n else:\n raise ValueError\n if self.units == \"cm\":\n if other.units == \"mm\":\n return self.amount / (other.amount / 10.0)\n elif other.units == \"in\":\n return self.amount / (other.amount / 0.393701)\n else:\n raise ValueError\n if self.units == \"mm\":\n if other.units == \"cm\":\n return self.amount / (other.amount * 10.0)\n elif other.units == \"in\":\n return self.amount / (other.amount / 0.0393701)\n else:\n raise ValueError\n if self.units == \"in\":\n if other.units == \"cm\":\n return self.amount / (other.amount * 0.393701)\n elif other.units == \"mm\":\n return self.amount / (other.amount * 0.0393701)\n else:\n raise ValueError\n raise ValueError\n\n __floordiv__ = __truediv__\n __div__ = __truediv__\n\n def __lt__(self, other):\n return (self - other).amount < 0.0\n\n def __le__(self, other):\n return (self - other).amount <= 0.0\n\n def __gt__(self, other):\n return (self - other).amount > 0.0\n\n def __ge__(self, other):\n return (self - other).amount >= 0.0\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __add__(self, other):\n if isinstance(other, (str, float, int)):\n other = Length(other)\n c = self.__copy__()\n c += other\n return c\n\n __radd__ = __add__\n\n def __mul__(self, other):\n c = copy(self)\n c *= other\n return c\n\n def __rdiv__(self, other):\n c = copy(self)\n c *= 1.0 / other.amount\n return c\n\n def __neg__(self):\n s = self.__copy__()\n s.amount = -s.amount\n return s\n\n def __isub__(self, other):\n if isinstance(other, (str, float, int)):\n other = Length(other)\n self += -other\n return self\n\n def __sub__(self, other):\n s = self.__copy__()\n s -= other\n return s\n\n def __rsub__(self, other):\n if isinstance(other, (str, float, int)):\n other = Length(other)\n return (-self) + other\n\n def __copy__(self):\n return Length(self.amount, self.units)\n\n __rmul__ = __mul__\n\n def __repr__(self):\n return \"Length('%s')\" % (str(self))\n\n def __str__(self):\n if self.amount is None:\n return SVG_VALUE_NONE\n return \"%s%s\" % (Length.str(self.amount), self.units)\n\n def __eq__(self, other):\n if other is None:\n return False\n s = self.in_pixels()\n if isinstance(other, (float, int)):\n if s is not None:\n return abs(s - other) <= ERROR\n else:\n return other == 0 and self.amount == 0\n if isinstance(other, str):\n other = Length(other)\n if self.amount == other.amount and self.units == other.units:\n return True\n if s is not None:\n o = self.in_pixels()\n if abs(s - o) <= ERROR:\n return True\n s = self.in_inches()\n if s is not None:\n o = self.in_inches()\n if abs(s - o) <= ERROR:\n return True\n return False\n\n @property\n def value_in_units(self):\n return self.amount\n\n def in_pixels(self):\n if self.units == \"px\" or self.units == \"\":\n return self.amount\n if self.units == \"pt\":\n return self.amount / 1.3333\n if self.units == \"pc\":\n return self.amount / 16.0\n return None\n\n def in_inches(self):\n if self.units == \"mm\":\n return self.amount * 0.0393701\n if self.units == \"cm\":\n return self.amount * 0.393701\n if self.units == \"in\":\n return self.amount\n return None\n\n def to_mm(\n self,\n ppi=DEFAULT_PPI,\n relative_length=None,\n font_size=None,\n font_height=None,\n viewbox=None,\n ):\n value = self.value(\n ppi=ppi,\n relative_length=relative_length,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n v = value / (ppi * 0.0393701)\n return Length(\"%smm\" % (Length.str(v)))\n\n def to_cm(\n self,\n ppi=DEFAULT_PPI,\n relative_length=None,\n font_size=None,\n font_height=None,\n viewbox=None,\n ):\n value = self.value(\n ppi=ppi,\n relative_length=relative_length,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n v = value / (ppi * 0.393701)\n return Length(\"%scm\" % (Length.str(v)))\n\n def to_inch(\n self,\n ppi=DEFAULT_PPI,\n relative_length=None,\n font_size=None,\n font_height=None,\n viewbox=None,\n ):\n value = self.value(\n ppi=ppi,\n relative_length=relative_length,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n v = value / ppi\n return Length(\"%sin\" % (Length.str(v)))\n\n def value(\n self,\n ppi=None,\n relative_length=None,\n font_size=None,\n font_height=None,\n viewbox=None,\n **kwargs\n ):\n if self.amount is None:\n return None\n if self.units == \"%\":\n if relative_length is None:\n return self\n fraction = self.amount / 100.0\n if isinstance(relative_length, (float, int)):\n return fraction * relative_length\n elif isinstance(relative_length, (str, Length)):\n length = relative_length * self\n if isinstance(length, Length):\n return length.value(\n ppi=ppi,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n return length\n return self\n if self.units == \"mm\":\n if ppi is None:\n return self\n return self.amount * ppi * 0.0393701\n if self.units == \"cm\":\n if ppi is None:\n return self\n return self.amount * ppi * 0.393701\n if self.units == \"in\":\n if ppi is None:\n return self\n return self.amount * ppi\n if self.units == \"px\" or self.units == \"\":\n return self.amount\n if self.units == \"pt\":\n return self.amount * 1.3333\n if self.units == \"pc\":\n return self.amount * 16.0\n if self.units == \"em\":\n if font_size is None:\n return self\n return self.amount * float(font_size)\n if self.units == \"ex\":\n if font_height is None:\n return self\n return self.amount * float(font_height)\n if self.units == \"vw\":\n if viewbox is None:\n return self\n v = Viewbox(viewbox)\n return self.amount * v.width / 100.0\n if self.units == \"vh\":\n if viewbox is None:\n return self\n v = Viewbox(viewbox)\n return self.amount * v.height / 100.0\n if self.units == \"vmin\":\n if viewbox is None:\n return self\n v = Viewbox(viewbox)\n m = min(v.height, v.height)\n return self.amount * m / 100.0\n if self.units == \"vmax\":\n if viewbox is None:\n return self\n v = Viewbox(viewbox)\n m = max(v.height, v.height)\n return self.amount * m / 100.0\n try:\n return float(self)\n except ValueError:\n return self\n\n @staticmethod\n def str(s):\n if s is None:\n return \"n/a\"\n if isinstance(s, Length):\n if s.units == \"\":\n s = s.amount\n else:\n a = \"%.12f\" % (s.amount)\n if \".\" in a:\n a = a.rstrip(\"0\").rstrip(\".\")\n return \"'%s%s'\" % (a, s.units)\n try:\n s = \"%.12f\" % (s)\n except TypeError:\n return str(s)\n if \".\" in s:\n s = s.rstrip(\"0\").rstrip(\".\")\n return s\n\n\nclass Color(object):\n \"\"\"\n SVG Color Parsing\n Parses different forms of defining colors.\n\n Including keyword: https://www.w3.org/TR/SVG11/types.html#ColorKeywords\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.value = 0\n if len(args) == 0:\n r = 0\n g = 0\n b = 0\n if \"red\" in kwargs:\n r = kwargs[\"red\"]\n if \"green\" in kwargs:\n g = kwargs[\"green\"]\n if \"blue\" in kwargs:\n b = kwargs[\"blue\"]\n if \"r\" in kwargs:\n r = kwargs[\"r\"]\n if \"g\" in kwargs:\n g = kwargs[\"g\"]\n if \"b\" in kwargs:\n b = kwargs[\"b\"]\n self.value = Color.rgb_to_int(r, g, b)\n if 1 <= len(args) <= 2:\n v = args[0]\n if isinstance(v, Color):\n self.value = v.value\n elif isinstance(v, int):\n self.value = v\n else:\n self.value = Color.parse(v)\n if len(args) == 2:\n self.opacity = float(args[1])\n elif len(args) == 3:\n r = args[0]\n g = args[1]\n b = args[2]\n self.value = Color.rgb_to_int(r, g, b)\n elif len(args) == 4:\n r = args[0]\n g = args[1]\n b = args[2]\n opacity = args[3] / 255.0\n self.value = Color.rgb_to_int(r, g, b, opacity)\n\n def __int__(self):\n return self.value\n\n def __str__(self):\n if self.value is None:\n return str(self.value)\n return self.hex\n\n def __repr__(self):\n if self.value is None:\n return \"Color('%s')\" % (self.value)\n return \"Color('%s')\" % (self.hex)\n\n def __eq__(self, other):\n if self is other:\n return True\n first = self.value\n second = other\n if isinstance(second, str):\n second = Color(second)\n if isinstance(second, Color):\n second = second.value\n return first == second\n\n def __ne__(self, other):\n return not self == other\n\n def __abs__(self):\n # Return opaque color.\n if self.value is None:\n return Color(self.value)\n return Color(self.red, self.green, self.blue)\n\n @staticmethod\n def rgb_to_int(r, g, b, opacity=1.0):\n if opacity > 1:\n opacity = 1.0\n if opacity < 0:\n opacity = 0\n r = Color.crimp(r)\n g = Color.crimp(g)\n b = Color.crimp(b)\n a = Color.crimp(opacity * 255.0)\n if a & 0x80 != 0:\n a ^= 0x80\n a <<= 24\n a = ~a\n a ^= 0x7FFFFFFF\n else:\n a <<= 24\n r <<= 16\n g <<= 8\n c = r | g | b | a\n return c\n\n @staticmethod\n def hsl_to_int(h, s, l, opacity=1.0):\n c = Color()\n c.opacity = opacity\n c.hsl = h, s, l\n return c.value\n\n @staticmethod\n def parse(color_string):\n \"\"\"Parse SVG color, will return a set value.\"\"\"\n if color_string is None or color_string == SVG_VALUE_NONE:\n return None\n match = REGEX_COLOR_HEX.match(color_string)\n if match:\n return Color.parse_color_hex(color_string)\n match = REGEX_COLOR_RGB.match(color_string)\n if match:\n return Color.parse_color_rgb(match.groups())\n match = REGEX_COLOR_RGB_PERCENT.match(color_string)\n if match:\n return Color.parse_color_rgbp(match.groups())\n match = REGEX_COLOR_HSL.match(color_string)\n if match:\n return Color.parse_color_hsl(match.groups())\n return Color.parse_color_lookup(color_string)\n\n @staticmethod\n def parse_color_lookup(v):\n \"\"\"Parse SVG Color by Keyword on dictionary lookup\"\"\"\n if not isinstance(v, str):\n return Color.rgb_to_int(0, 0, 0)\n else:\n v = v.replace(\" \", \"\").lower()\n if v == \"transparent\":\n return Color.rgb_to_int(0, 0, 0, 0.0)\n if v == \"aliceblue\":\n return Color.rgb_to_int(250, 248, 255)\n if v == \"aliceblue\":\n return Color.rgb_to_int(240, 248, 255)\n if v == \"antiquewhite\":\n return Color.rgb_to_int(250, 235, 215)\n if v == \"aqua\":\n return Color.rgb_to_int(0, 255, 255)\n if v == \"aquamarine\":\n return Color.rgb_to_int(127, 255, 212)\n if v == \"azure\":\n return Color.rgb_to_int(240, 255, 255)\n if v == \"beige\":\n return Color.rgb_to_int(245, 245, 220)\n if v == \"bisque\":\n return Color.rgb_to_int(255, 228, 196)\n if v == \"black\":\n return Color.rgb_to_int(0, 0, 0)\n if v == \"blanchedalmond\":\n return Color.rgb_to_int(255, 235, 205)\n if v == \"blue\":\n return Color.rgb_to_int(0, 0, 255)\n if v == \"blueviolet\":\n return Color.rgb_to_int(138, 43, 226)\n if v == \"brown\":\n return Color.rgb_to_int(165, 42, 42)\n if v == \"burlywood\":\n return Color.rgb_to_int(222, 184, 135)\n if v == \"cadetblue\":\n return Color.rgb_to_int(95, 158, 160)\n if v == \"chartreuse\":\n return Color.rgb_to_int(127, 255, 0)\n if v == \"chocolate\":\n return Color.rgb_to_int(210, 105, 30)\n if v == \"coral\":\n return Color.rgb_to_int(255, 127, 80)\n if v == \"cornflowerblue\":\n return Color.rgb_to_int(100, 149, 237)\n if v == \"cornsilk\":\n return Color.rgb_to_int(255, 248, 220)\n if v == \"crimson\":\n return Color.rgb_to_int(220, 20, 60)\n if v == \"cyan\":\n return Color.rgb_to_int(0, 255, 255)\n if v == \"darkblue\":\n return Color.rgb_to_int(0, 0, 139)\n if v == \"darkcyan\":\n return Color.rgb_to_int(0, 139, 139)\n if v == \"darkgoldenrod\":\n return Color.rgb_to_int(184, 134, 11)\n if v == \"darkgray\":\n return Color.rgb_to_int(169, 169, 169)\n if v == \"darkgreen\":\n return Color.rgb_to_int(0, 100, 0)\n if v == \"darkgrey\":\n return Color.rgb_to_int(169, 169, 169)\n if v == \"darkkhaki\":\n return Color.rgb_to_int(189, 183, 107)\n if v == \"darkmagenta\":\n return Color.rgb_to_int(139, 0, 139)\n if v == \"darkolivegreen\":\n return Color.rgb_to_int(85, 107, 47)\n if v == \"darkorange\":\n return Color.rgb_to_int(255, 140, 0)\n if v == \"darkorchid\":\n return Color.rgb_to_int(153, 50, 204)\n if v == \"darkred\":\n return Color.rgb_to_int(139, 0, 0)\n if v == \"darksalmon\":\n return Color.rgb_to_int(233, 150, 122)\n if v == \"darkseagreen\":\n return Color.rgb_to_int(143, 188, 143)\n if v == \"darkslateblue\":\n return Color.rgb_to_int(72, 61, 139)\n if v == \"darkslategray\":\n return Color.rgb_to_int(47, 79, 79)\n if v == \"darkslategrey\":\n return Color.rgb_to_int(47, 79, 79)\n if v == \"darkturquoise\":\n return Color.rgb_to_int(0, 206, 209)\n if v == \"darkviolet\":\n return Color.rgb_to_int(148, 0, 211)\n if v == \"deeppink\":\n return Color.rgb_to_int(255, 20, 147)\n if v == \"deepskyblue\":\n return Color.rgb_to_int(0, 191, 255)\n if v == \"dimgray\":\n return Color.rgb_to_int(105, 105, 105)\n if v == \"dimgrey\":\n return Color.rgb_to_int(105, 105, 105)\n if v == \"dodgerblue\":\n return Color.rgb_to_int(30, 144, 255)\n if v == \"firebrick\":\n return Color.rgb_to_int(178, 34, 34)\n if v == \"floralwhite\":\n return Color.rgb_to_int(255, 250, 240)\n if v == \"forestgreen\":\n return Color.rgb_to_int(34, 139, 34)\n if v == \"fuchsia\":\n return Color.rgb_to_int(255, 0, 255)\n if v == \"gainsboro\":\n return Color.rgb_to_int(220, 220, 220)\n if v == \"ghostwhite\":\n return Color.rgb_to_int(248, 248, 255)\n if v == \"gold\":\n return Color.rgb_to_int(255, 215, 0)\n if v == \"goldenrod\":\n return Color.rgb_to_int(218, 165, 32)\n if v == \"gray\":\n return Color.rgb_to_int(128, 128, 128)\n if v == \"grey\":\n return Color.rgb_to_int(128, 128, 128)\n if v == \"green\":\n return Color.rgb_to_int(0, 128, 0)\n if v == \"greenyellow\":\n return Color.rgb_to_int(173, 255, 47)\n if v == \"honeydew\":\n return Color.rgb_to_int(240, 255, 240)\n if v == \"hotpink\":\n return Color.rgb_to_int(255, 105, 180)\n if v == \"indianred\":\n return Color.rgb_to_int(205, 92, 92)\n if v == \"indigo\":\n return Color.rgb_to_int(75, 0, 130)\n if v == \"ivory\":\n return Color.rgb_to_int(255, 255, 240)\n if v == \"khaki\":\n return Color.rgb_to_int(240, 230, 140)\n if v == \"lavender\":\n return Color.rgb_to_int(230, 230, 250)\n if v == \"lavenderblush\":\n return Color.rgb_to_int(255, 240, 245)\n if v == \"lawngreen\":\n return Color.rgb_to_int(124, 252, 0)\n if v == \"lemonchiffon\":\n return Color.rgb_to_int(255, 250, 205)\n if v == \"lightblue\":\n return Color.rgb_to_int(173, 216, 230)\n if v == \"lightcoral\":\n return Color.rgb_to_int(240, 128, 128)\n if v == \"lightcyan\":\n return Color.rgb_to_int(224, 255, 255)\n if v == \"lightgoldenrodyellow\":\n return Color.rgb_to_int(250, 250, 210)\n if v == \"lightgray\":\n return Color.rgb_to_int(211, 211, 211)\n if v == \"lightgreen\":\n return Color.rgb_to_int(144, 238, 144)\n if v == \"lightgrey\":\n return Color.rgb_to_int(211, 211, 211)\n if v == \"lightpink\":\n return Color.rgb_to_int(255, 182, 193)\n if v == \"lightsalmon\":\n return Color.rgb_to_int(255, 160, 122)\n if v == \"lightseagreen\":\n return Color.rgb_to_int(32, 178, 170)\n if v == \"lightskyblue\":\n return Color.rgb_to_int(135, 206, 250)\n if v == \"lightslategray\":\n return Color.rgb_to_int(119, 136, 153)\n if v == \"lightslategrey\":\n return Color.rgb_to_int(119, 136, 153)\n if v == \"lightsteelblue\":\n return Color.rgb_to_int(176, 196, 222)\n if v == \"lightyellow\":\n return Color.rgb_to_int(255, 255, 224)\n if v == \"lime\":\n return Color.rgb_to_int(0, 255, 0)\n if v == \"limegreen\":\n return Color.rgb_to_int(50, 205, 50)\n if v == \"linen\":\n return Color.rgb_to_int(250, 240, 230)\n if v == \"magenta\":\n return Color.rgb_to_int(255, 0, 255)\n if v == \"maroon\":\n return Color.rgb_to_int(128, 0, 0)\n if v == \"mediumaquamarine\":\n return Color.rgb_to_int(102, 205, 170)\n if v == \"mediumblue\":\n return Color.rgb_to_int(0, 0, 205)\n if v == \"mediumorchid\":\n return Color.rgb_to_int(186, 85, 211)\n if v == \"mediumpurple\":\n return Color.rgb_to_int(147, 112, 219)\n if v == \"mediumseagreen\":\n return Color.rgb_to_int(60, 179, 113)\n if v == \"mediumslateblue\":\n return Color.rgb_to_int(123, 104, 238)\n if v == \"mediumspringgreen\":\n return Color.rgb_to_int(0, 250, 154)\n if v == \"mediumturquoise\":\n return Color.rgb_to_int(72, 209, 204)\n if v == \"mediumvioletred\":\n return Color.rgb_to_int(199, 21, 133)\n if v == \"midnightblue\":\n return Color.rgb_to_int(25, 25, 112)\n if v == \"mintcream\":\n return Color.rgb_to_int(245, 255, 250)\n if v == \"mistyrose\":\n return Color.rgb_to_int(255, 228, 225)\n if v == \"moccasin\":\n return Color.rgb_to_int(255, 228, 181)\n if v == \"navajowhite\":\n return Color.rgb_to_int(255, 222, 173)\n if v == \"navy\":\n return Color.rgb_to_int(0, 0, 128)\n if v == \"oldlace\":\n return Color.rgb_to_int(253, 245, 230)\n if v == \"olive\":\n return Color.rgb_to_int(128, 128, 0)\n if v == \"olivedrab\":\n return Color.rgb_to_int(107, 142, 35)\n if v == \"orange\":\n return Color.rgb_to_int(255, 165, 0)\n if v == \"orangered\":\n return Color.rgb_to_int(255, 69, 0)\n if v == \"orchid\":\n return Color.rgb_to_int(218, 112, 214)\n if v == \"palegoldenrod\":\n return Color.rgb_to_int(238, 232, 170)\n if v == \"palegreen\":\n return Color.rgb_to_int(152, 251, 152)\n if v == \"paleturquoise\":\n return Color.rgb_to_int(175, 238, 238)\n if v == \"palevioletred\":\n return Color.rgb_to_int(219, 112, 147)\n if v == \"papayawhip\":\n return Color.rgb_to_int(255, 239, 213)\n if v == \"peachpuff\":\n return Color.rgb_to_int(255, 218, 185)\n if v == \"peru\":\n return Color.rgb_to_int(205, 133, 63)\n if v == \"pink\":\n return Color.rgb_to_int(255, 192, 203)\n if v == \"plum\":\n return Color.rgb_to_int(221, 160, 221)\n if v == \"powderblue\":\n return Color.rgb_to_int(176, 224, 230)\n if v == \"purple\":\n return Color.rgb_to_int(128, 0, 128)\n if v == \"red\":\n return Color.rgb_to_int(255, 0, 0)\n if v == \"rosybrown\":\n return Color.rgb_to_int(188, 143, 143)\n if v == \"royalblue\":\n return Color.rgb_to_int(65, 105, 225)\n if v == \"saddlebrown\":\n return Color.rgb_to_int(139, 69, 19)\n if v == \"salmon\":\n return Color.rgb_to_int(250, 128, 114)\n if v == \"sandybrown\":\n return Color.rgb_to_int(244, 164, 96)\n if v == \"seagreen\":\n return Color.rgb_to_int(46, 139, 87)\n if v == \"seashell\":\n return Color.rgb_to_int(255, 245, 238)\n if v == \"sienna\":\n return Color.rgb_to_int(160, 82, 45)\n if v == \"silver\":\n return Color.rgb_to_int(192, 192, 192)\n if v == \"skyblue\":\n return Color.rgb_to_int(135, 206, 235)\n if v == \"slateblue\":\n return Color.rgb_to_int(106, 90, 205)\n if v == \"slategray\":\n return Color.rgb_to_int(112, 128, 144)\n if v == \"slategrey\":\n return Color.rgb_to_int(112, 128, 144)\n if v == \"snow\":\n return Color.rgb_to_int(255, 250, 250)\n if v == \"springgreen\":\n return Color.rgb_to_int(0, 255, 127)\n if v == \"steelblue\":\n return Color.rgb_to_int(70, 130, 180)\n if v == \"tan\":\n return Color.rgb_to_int(210, 180, 140)\n if v == \"teal\":\n return Color.rgb_to_int(0, 128, 128)\n if v == \"thistle\":\n return Color.rgb_to_int(216, 191, 216)\n if v == \"tomato\":\n return Color.rgb_to_int(255, 99, 71)\n if v == \"turquoise\":\n return Color.rgb_to_int(64, 224, 208)\n if v == \"violet\":\n return Color.rgb_to_int(238, 130, 238)\n if v == \"wheat\":\n return Color.rgb_to_int(245, 222, 179)\n if v == \"white\":\n return Color.rgb_to_int(255, 255, 255)\n if v == \"whitesmoke\":\n return Color.rgb_to_int(245, 245, 245)\n if v == \"yellow\":\n return Color.rgb_to_int(255, 255, 0)\n if v == \"yellowgreen\":\n return Color.rgb_to_int(154, 205, 50)\n return Color.rgb_to_int(0, 0, 0)\n\n @staticmethod\n def parse_color_hex(hex_string):\n \"\"\"Parse SVG Color by Hex String\"\"\"\n h = hex_string.lstrip(\"#\")\n size = len(h)\n if size == 8:\n return int(h[:8], 16)\n elif size == 6:\n s = \"{0}\".format(h[:6])\n q = ~int(s, 16) & 0xFFFFFF\n v = -1 ^ q\n return v\n elif size == 4:\n s = h[0] + h[0] + h[1] + h[1] + h[2] + h[2] + h[3] + h[3]\n return int(s, 16)\n elif size == 3:\n s = \"{0}{0}{1}{1}{2}{2}\".format(h[0], h[1], h[2])\n q = ~int(s, 16) & 0xFFFFFF\n v = -1 ^ q\n return v\n return Color.rgb_to_int(0, 0, 0)\n\n @staticmethod\n def parse_color_rgb(values):\n \"\"\"Parse SVG Color, RGB value declarations \"\"\"\n r = int(values[0])\n g = int(values[1])\n b = int(values[2])\n if values[3] is not None:\n opacity = float(values[3])\n else:\n opacity = 1\n return Color.rgb_to_int(r, g, b, opacity)\n\n @staticmethod\n def parse_color_rgbp(values):\n \"\"\"Parse SVG color, RGB percent value declarations\"\"\"\n ratio = 255.0 / 100.0\n r = round(float(values[0]) * ratio)\n g = round(float(values[1]) * ratio)\n b = round(float(values[2]) * ratio)\n if values[3] is not None:\n opacity = float(values[3])\n else:\n opacity = 1\n return Color.rgb_to_int(r, g, b, opacity)\n\n @staticmethod\n def parse_color_hsl(values):\n \"\"\"Parse SVG color, HSL value declarations\"\"\"\n h = Angle.parse(values[0])\n h = h.as_turns\n s = float(values[1]) / 100.0\n if s > 1:\n s = 1.0\n if s < 0:\n s = 0.0\n l = float(values[2]) / 100.0\n if l > 1:\n l = 1.0\n if l < 0:\n l = 0.0\n if values[3] is not None:\n opacity = float(values[3])\n else:\n opacity = 1\n return Color.hsl_to_int(h, s, l, opacity)\n\n @property\n def opacity(self):\n return self.alpha / 255.0 if self.value is not None else None\n\n @opacity.setter\n def opacity(self, opacity):\n if self.value is None:\n raise ValueError\n a = int(round(opacity * 255.0))\n a = Color.crimp(a)\n self.alpha = a\n\n @property\n def alpha(self):\n return (self.value >> 24) & 0xFF if self.value is not None else None\n\n @alpha.setter\n def alpha(self, a):\n if self.value is None:\n raise ValueError\n a = Color.crimp(a)\n self.value &= 0xFFFFFF\n self.value = int(self.value)\n if a & 0x80 != 0:\n a ^= 0x80\n a <<= 24\n a = ~a\n a ^= 0x7FFFFFFF\n else:\n a <<= 24\n self.value |= a\n\n @property\n def red(self):\n return (self.value >> 16) & 0xFF if self.value is not None else None\n\n @red.setter\n def red(self, r):\n if self.value is None:\n raise ValueError\n r = int(r & 0xFF)\n self.value &= ~0xFF0000\n r <<= 16\n self.value |= r\n\n @property\n def green(self):\n return (self.value >> 8) & 0xFF if self.value is not None else None\n\n @green.setter\n def green(self, g):\n if self.value is None:\n raise ValueError\n g = int(g & 0xFF)\n self.value &= ~0xFF00\n g <<= 8\n self.value |= g\n\n @property\n def blue(self):\n return self.value & 0xFF if self.value is not None else None\n\n @blue.setter\n def blue(self, b):\n if self.value is None:\n raise ValueError\n b = int(b & 0xFF)\n self.value &= ~0xFF\n self.value |= b\n\n @property\n def hexa(self):\n return (\n \"#%02x%02x%02x%02x\" % (self.alpha, self.red, self.green, self.blue)\n if self.value is not None\n else None\n )\n\n @property\n def hex(self):\n if self.alpha == 0xFF:\n return (\n \"#%02x%02x%02x\" % (self.red, self.green, self.blue)\n if self.value is not None\n else None\n )\n else:\n return self.hexa\n\n @property\n def hue(self):\n if self.value is None:\n return None\n r = self.red / 255.0\n g = self.green / 255.0\n b = self.blue / 255.0\n var_min = min(r, g, b)\n var_max = max(r, g, b)\n delta_max = var_max - var_min\n if delta_max == 0:\n return 0\n dr = (((var_max - r) / 6.0) + delta_max / 2.0) / delta_max\n dg = (((var_max - g) / 6.0) + delta_max / 2.0) / delta_max\n db = (((var_max - b) / 6.0) + delta_max / 2.0) / delta_max\n if r == var_max:\n h = db - dg\n elif g == var_max:\n h = (1.0 / 3.0) + dr - db\n else: # db == max_v\n h = (2.0 / 3.0) + dg - dr\n if h < 0:\n h += 1\n if h > 1:\n h -= 1\n return h\n\n @hue.setter\n def hue(self, v):\n if self.value is None:\n raise ValueError\n h, s, l = self.hsl\n self.hsl = v, s, l\n\n @property\n def saturation(self):\n if self.value is None:\n return None\n r = self.red / 255.0\n g = self.green / 255.0\n b = self.blue / 255.0\n min_v = min(r, g, b)\n max_v = max(r, g, b)\n delta = max_v - min_v\n if max_v == min_v:\n return 0.0\n if (max_v + min_v) < 1:\n return delta / (max_v + min_v)\n else:\n return delta / (2.0 - max_v - min_v)\n\n @saturation.setter\n def saturation(self, v):\n if self.value is None:\n raise ValueError\n h, s, l = self.hsl\n self.hsl = h, v, l\n\n @property\n def lightness(self):\n if self.value is None:\n return None\n r = self.red / 255.0\n g = self.green / 255.0\n b = self.blue / 255.0\n min_v = min(r, g, b)\n max_v = max(r, g, b)\n return (max_v + min_v) / 2.0\n\n @lightness.setter\n def lightness(self, v):\n if self.value is None:\n raise ValueError\n h, s, l = self.hsl\n self.hsl = h, s, v\n\n @property\n def intensity(self):\n if self.value is None:\n return None\n r = self.red\n g = self.green\n b = self.blue\n return (r + b + g) / 768.0\n\n @property\n def brightness(self):\n if self.value is None:\n return None\n r = self.red\n g = self.green\n b = self.blue\n cmax = max(r, g, b)\n return cmax / 255.0\n\n @property\n def blackness(self):\n if self.value is None:\n return None\n return 1.0 - self.brightness\n\n @property\n def luminance(self):\n if self.value is None:\n return None\n r = self.red / 255.0\n g = self.green / 255.0\n b = self.blue / 255.0\n return r * 0.3 + g * 0.59 + b * 0.11\n\n @property\n def luma(self):\n if self.value is None:\n return None\n r = self.red / 255.0\n g = self.green / 255.0\n b = self.blue / 255.0\n return r * 0.2126 + g * 0.7152 + b * 0.0722\n\n @staticmethod\n def over(c1, c2):\n \"\"\"\n Porter Duff Alpha compositing operation over.\n Returns c1 over c2. This is the standard painter algorithm.\n \"\"\"\n if isinstance(c1, str):\n c1 = Color.parse(c1)\n elif isinstance(c1, int):\n c1 = Color(c1)\n if isinstance(c2, str):\n c2 = Color.parse(c2)\n elif isinstance(c2, int):\n c2 = Color(c2)\n r1 = c1.red\n g1 = c1.green\n b1 = c1.blue\n a1 = c1.alpha\n if a1 == 255:\n return c1.value\n if a1 == 0:\n return c2.value\n r2 = c2.red\n g2 = c2.green\n b2 = c2.blue\n a2 = c2.alpha\n\n q = 255.0 - a1\n\n sr = r1 * a1 * 255.0 + r2 * a2 * q\n sg = g1 * a1 * 255.0 + g2 * a2 * q\n sb = b1 * a1 * 255.0 + b2 * a2 * q\n sa = a1 * 255.0 + a2 * q\n sr /= sa\n sg /= sa\n sb /= sa\n sa /= 255.0 * 255.0\n return Color.rgb_to_int(sr, sg, sb, sa)\n\n @staticmethod\n def distance(c1, c2):\n return sqrt(Color.distance_sq(c1, c2))\n\n @staticmethod\n def distance_sq(c1, c2):\n \"\"\"\n Function returns the square of colordistance. The square of the color distance will always be closer than the\n square of another color distance.\n\n Rather than naive Euclidean distance we use Compuphase's Redmean color distance.\n https://www.compuphase.com/cmetric.htm\n\n It's computationally simple, and empirical tests finds it to be on par with LabDE2000.\n\n :param c1: first color\n :param c2: second color\n :return: square of color distance\n \"\"\"\n if isinstance(c1, str):\n c1 = Color(c1)\n elif isinstance(c1, int):\n c1 = Color(c1)\n if isinstance(c2, str):\n c2 = Color(c2)\n elif isinstance(c2, int):\n c2 = Color(c2)\n red_mean = int((c1.red + c2.red) / 2.0)\n r = c1.red - c2.red\n g = c1.green - c2.green\n b = c1.blue - c2.blue\n return (((512 + red_mean) * r * r) >> 8) + 4 * g * g + (\n (767 - red_mean) * b * b\n ) >> 8\n\n @staticmethod\n def crimp(v):\n if v > 255:\n return 255\n if v < 0:\n return 0\n return int(v)\n\n @property\n def hsl(self):\n if self.value is None:\n return None\n return self.hue, self.saturation, self.lightness\n\n @hsl.setter\n def hsl(self, value):\n if not isinstance(value, tuple):\n return\n h, s, l = value\n\n def hue_2_rgb(v1, v2, vh):\n if vh < 0:\n vh += 1\n if vh > 1:\n vh -= 1\n if 6.0 * vh < 1.0:\n return v1 + (v2 - v1) * 6.0 * vh\n if 2.0 * vh < 1:\n return v2\n if 3 * vh < 2.0:\n return v1 + (v2 - v1) * ((2.0 / 3.0) - vh) * 6.0\n return v1\n\n if s == 0.0:\n r = 255.0 * l\n g = 255.0 * l\n b = 255.0 * l\n else:\n if l < 0.5:\n v2 = l * (1.0 + s)\n else:\n v2 = (l + s) - (s * l)\n v1 = 2 * l - v2\n r = 255.0 * hue_2_rgb(v1, v2, h + (1.0 / 3.0))\n g = 255.0 * hue_2_rgb(v1, v2, h)\n b = 255.0 * hue_2_rgb(v1, v2, h - (1.0 / 3.0))\n self.value = self.rgb_to_int(r, g, b)\n\n def distance_to(self, other):\n return Color.distance(self, other)\n\n def blend(self, other, opacity=None):\n \"\"\"\n Blends the given color with the current color.\n \"\"\"\n if opacity is None:\n self.value = Color.over(other, self)\n else:\n color = Color(other)\n color.opacity = opacity\n self.value = Color.over(color, self)\n\n\nclass Point:\n \"\"\"Point is a general subscriptable point class with .x and .y as well as [0] and [1]\n\n For compatibility with regebro svg.path we accept complex numbers as points x + yj,\n and provide .real and .imag as properties. As well as float and integer values as (v,0) elements.\n\n With regard to SVG 7.15.1 defining SVGPoint this class provides for matrix transformations.\n\n Points are only positions in real Euclidean space. This class is not intended to interact with\n the Length class.\n \"\"\"\n\n def __init__(self, x, y=None):\n if x is not None and y is None:\n if isinstance(x, str):\n string_x, string_y = REGEX_COORD_PAIR.findall(x)[0]\n self.x = float(string_x)\n self.y = float(string_y)\n return\n try: # Try .x .y\n self.y = x.y\n self.x = x.x\n return\n except AttributeError:\n pass\n try: # try subscription.\n self.y = x[1]\n self.x = x[0]\n return\n except TypeError:\n pass\n try: # try .imag .real complex values.\n self.y = x.imag\n self.x = x.real\n return\n except AttributeError:\n # Unknown.\n raise TypeError\n self.x = x\n self.y = y\n\n def __key(self):\n return (self.x, self.y)\n\n def __hash__(self):\n return hash(self.__key())\n\n def __eq__(self, other):\n if other is None:\n return False\n try:\n if not isinstance(other, Point):\n other = Point(other)\n except Exception:\n return NotImplemented\n\n return abs(self.x - other.x) <= ERROR and abs(self.y - other.y) <= ERROR\n\n def __ne__(self, other):\n return not self == other\n\n def __len__(self):\n return 2\n\n def __getitem__(self, item):\n if item == 0:\n return self.x\n elif item == 1:\n return self.y\n else:\n raise IndexError\n\n def __setitem__(self, key, value):\n if key == 0:\n self.x = value\n elif key == 1:\n self.y = value\n else:\n raise IndexError\n\n def __repr__(self):\n x_str = Length.str(self.x)\n y_str = Length.str(self.y)\n return \"Point(%s,%s)\" % (x_str, y_str)\n\n def __copy__(self):\n return Point(self.x, self.y)\n\n def __str__(self):\n try:\n x_str = \"%.12G\" % (self.x)\n except TypeError:\n return self.__repr__()\n if \".\" in x_str:\n x_str = x_str.rstrip(\"0\").rstrip(\".\")\n y_str = \"%.12G\" % (self.y)\n if \".\" in y_str:\n y_str = y_str.rstrip(\"0\").rstrip(\".\")\n return \"%s,%s\" % (x_str, y_str)\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n v = other.point_in_matrix_space(self)\n self.x = v.x\n self.y = v.y\n return self\n try:\n c = complex(self) * complex(other.x, other.y)\n self.x = c.real\n self.y = c.imag\n return self\n except AttributeError:\n pass\n try:\n c = complex(self) * complex(other[0], other[1])\n self.x = c.real\n self.y = c.imag\n return self\n except (TypeError, IndexError):\n pass\n try:\n c = complex(self) * complex(other.real, other.imag)\n self.x = c.real\n self.y = c.imag\n return self\n except AttributeError:\n pass\n try:\n self.x *= other\n self.y *= other\n return self\n except Exception:\n return NotImplemented\n\n def __mul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n return other.point_in_matrix_space(self)\n try:\n return Point(complex(self) * complex(other.x, other.y))\n except AttributeError:\n pass\n try:\n return Point(complex(self) * complex(other[0], other[1]))\n except (TypeError, IndexError):\n pass\n try:\n return Point(complex(self) * complex(other.real, other.imag))\n except AttributeError:\n pass\n try:\n return Point(self.x * other, self.y * other)\n except Exception:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __iadd__(self, other):\n try:\n self.x += other.x\n self.y += other.y\n return self\n except AttributeError:\n pass\n try:\n self.y += other[1]\n self.x += other[0]\n return self\n except (TypeError, IndexError):\n pass\n try:\n self.x += other.real\n self.y += other.imag\n return self\n except AttributeError:\n pass\n try:\n self.x += other\n return self\n except Exception:\n return NotImplemented\n\n def __add__(self, other):\n try:\n x = self.x + other.x\n y = self.y + other.y\n return Point(x, y)\n except AttributeError:\n pass\n try:\n y = self.y + other[1]\n x = self.x + other[0]\n return Point(x, y)\n except (TypeError, IndexError):\n pass\n try:\n x = self.x + other.real\n y = self.y + other.imag\n return Point(x, y)\n except AttributeError:\n pass\n if isinstance(other, (float, int)):\n x = self.x + other\n return Point(x, self.y)\n return NotImplemented\n\n __radd__ = __add__\n\n def __isub__(self, other):\n try:\n self.x -= other.x\n self.y -= other.y\n return self\n except AttributeError:\n pass\n try:\n self.y -= other[1]\n self.x -= other[0]\n return self\n except (TypeError, IndexError):\n pass\n try:\n self.x -= other.real\n self.y -= other.imag\n return self\n except AttributeError:\n pass\n try:\n self.x -= other\n return self\n except Exception:\n return NotImplemented\n\n def __sub__(self, other):\n try:\n x = self.x - other.x\n y = self.y - other.y\n return Point(x, y)\n except AttributeError:\n pass\n try:\n y = self.y - other[1]\n x = self.x - other[0]\n return Point(x, y)\n except (TypeError, IndexError):\n pass\n try:\n x = self.x - other.real\n y = self.y - other.imag\n return Point(x, y)\n except AttributeError:\n pass\n if isinstance(other, (float, int)):\n x = self.x - other\n return Point(x, self.y)\n return NotImplemented\n\n def __rsub__(self, other):\n try:\n x = other.x - self.x\n y = other.y - self.y\n return Point(x, y)\n except AttributeError:\n pass\n try:\n y = other[1] - self.y\n x = other[0] - self.x\n return Point(x, y)\n except (TypeError, IndexError):\n pass\n try:\n x = other.real - self.x\n y = other.imag - self.y\n return Point(x, y)\n except AttributeError:\n pass\n if isinstance(other, (float, int)):\n x = other - self.x\n return Point(x, self.y)\n return NotImplemented\n\n def __complex__(self):\n return self.x + self.y * 1j\n\n def __abs__(self):\n return hypot(self.x, self.y)\n\n def __pow__(self, other):\n r_raised = abs(self) ** other\n argz_multiplied = self.argz() * other\n\n real_part = round(r_raised * cos(argz_multiplied))\n imag_part = round(r_raised * sin(argz_multiplied))\n return self.__class__(real_part, imag_part)\n\n def conjugate(self):\n return self.__class__(self.real, -self.imag)\n\n def argz(self):\n return atan(self.imag / self.real)\n\n @property\n def real(self):\n \"\"\"Emulate svg.path use of complex numbers\"\"\"\n return self.x\n\n @property\n def imag(self):\n \"\"\"Emulate svg.path use of complex numbers\"\"\"\n return self.y\n\n def matrix_transform(self, matrix):\n self *= matrix\n return self\n\n def move_towards(self, p2, amount=1):\n if not isinstance(p2, Point):\n p2 = Point(p2)\n self += amount * (p2 - self)\n\n def distance_to(self, p2):\n return abs(self - p2)\n\n def angle_to(self, p2):\n p = p2 - self\n return Angle.radians(atan2(p.y, p.x))\n\n def polar_to(self, angle, distance):\n q = Point.polar(self, angle, distance)\n self.x = q.x\n self.y = q.y\n return self\n\n def reflected_across(self, p):\n return p + (p - self)\n\n @staticmethod\n def orientation(p, q, r):\n \"\"\"Determine the clockwise, linear, or counterclockwise orientation of the given points\"\"\"\n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2\n\n @staticmethod\n def convex_hull(pts):\n if len(pts) == 0:\n return\n points = sorted(set(pts), key=lambda p: p[0])\n first_point_on_hull = points[0]\n point_on_hull = first_point_on_hull\n while True:\n yield point_on_hull\n endpoint = point_on_hull\n for t in points:\n if (\n point_on_hull is endpoint\n or Point.orientation(point_on_hull, t, endpoint) == 2\n ):\n endpoint = t\n point_on_hull = endpoint\n if first_point_on_hull is point_on_hull:\n break\n\n @staticmethod\n def distance(p1, p2):\n dx = p1[0] - p2[0]\n dy = p1[1] - p2[1]\n dx *= dx\n dy *= dy\n return sqrt(dx + dy)\n\n @staticmethod\n def polar(p1, angle, r):\n dx = cos(angle) * r\n dy = sin(angle) * r\n return Point(p1[0] + dx, p1[1] + dy)\n\n @staticmethod\n def angle(p1, p2):\n return Angle.radians(atan2(p2[1] - p1[1], p2[0] - p1[0]))\n\n @staticmethod\n def towards(p1, p2, amount):\n tx = amount * (p2[0] - p1[0]) + p1[0]\n ty = amount * (p2[1] - p1[1]) + p1[1]\n return Point(tx, ty)\n\n\nclass Angle(float):\n \"\"\"CSS Angle defines as used in SVG/CSS\"\"\"\n\n def __repr__(self):\n return \"Angle(%.12f)\" % self\n\n def __copy__(self):\n return Angle(self)\n\n def __eq__(self, other):\n # Python 2\n c1 = abs((self % tau) - (other % tau)) <= 1e-11\n return c1\n\n def normalized(self):\n return Angle(self % tau)\n\n @classmethod\n def parse(cls, angle_string):\n if not isinstance(angle_string, str):\n return\n angle_string = angle_string.lower()\n if angle_string.endswith(\"deg\"):\n return Angle.degrees(float(angle_string[:-3]))\n if angle_string.endswith(\"grad\"):\n return Angle.gradians(float(angle_string[:-4]))\n if angle_string.endswith(\n \"rad\"\n ): # Must be after 'grad' since 'grad' ends with 'rad' too.\n return Angle.radians(float(angle_string[:-3]))\n if angle_string.endswith(\"turn\"):\n return Angle.turns(float(angle_string[:-4]))\n if angle_string.endswith(\"%\"):\n return Angle.turns(float(angle_string[:-1]) / 100.0)\n return Angle.degrees(float(angle_string))\n\n @classmethod\n def radians(cls, radians):\n return cls(radians)\n\n @classmethod\n def degrees(cls, degrees):\n return cls(tau * degrees / 360.0)\n\n @classmethod\n def gradians(cls, gradians):\n return cls(tau * gradians / 400.0)\n\n @classmethod\n def turns(cls, turns):\n return cls(tau * turns)\n\n @property\n def as_radians(self):\n return self\n\n @property\n def as_degrees(self):\n return self * 360.0 / tau\n\n @property\n def as_positive_degrees(self):\n v = self.as_degrees\n while v < 0:\n v += 360.0\n return v\n\n @property\n def as_gradians(self):\n return self * 400.0 / tau\n\n @property\n def as_turns(self):\n return self / tau\n\n def is_orthogonal(self):\n return (self % (tau / 4.0)) == 0\n\n\nclass Matrix:\n \"\"\" \"\n Provides svg matrix interfacing.\n\n SVG 7.15.3 defines the matrix form as:\n [a c e]\n [b d f]\n\n While e and f are defined as floats, they can be for limited periods defined as a Length.\n With regard to CSS, it's reasonable to perform operations like 'transform(20cm, 20cm)' and\n expect these to be treated consistently. Performing other matrix operations in a consistent\n way. However, render must be called to change these parameters into float locations prior to\n any operation which might be used to transform a point or polyline or path object.\n \"\"\"\n\n def __init__(self, *components, **kwargs):\n self.a = 1.0\n self.b = 0.0\n self.c = 0.0\n self.d = 1.0\n self.e = 0.0\n self.f = 0.0\n len_args = len(components)\n if len_args == 0:\n pass\n elif len_args == 1:\n m = components[0]\n if isinstance(m, str):\n self.parse(m)\n self.render(**kwargs)\n else:\n self.a = m[0]\n self.b = m[1]\n self.c = m[2]\n self.d = m[3]\n self.e = m[4]\n self.f = m[5]\n else:\n self.a = components[0]\n self.b = components[1]\n self.c = components[2]\n self.d = components[3]\n self.e = components[4]\n self.f = components[5]\n self.render(**kwargs)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __eq__(self, other):\n if other is None:\n return False\n if isinstance(other, str):\n other = Matrix(other)\n if not isinstance(other, Matrix):\n return False\n if abs(self.a - other.a) > 1e-12:\n return False\n if abs(self.b - other.b) > 1e-12:\n return False\n if abs(self.c - other.c) > 1e-12:\n return False\n if abs(self.d - other.d) > 1e-12:\n return False\n if self.e != other.e and abs(self.e - other.e) > 1e-12:\n return False\n if self.f != other.f and abs(self.f - other.f) > 1e-12:\n return False\n return True\n\n def __len__(self):\n return 6\n\n def __invert__(self):\n m = self.__copy__()\n return m.inverse()\n\n def __matmul__(self, other):\n m = copy(self)\n m.__imatmul__(other)\n return m\n\n def __rmatmul__(self, other):\n m = copy(other)\n m.__imatmul__(self)\n return m\n\n def __imatmul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n self.a, self.b, self.c, self.d, self.e, self.f = Matrix.matrix_multiply(\n self, other\n )\n return self\n\n __mul__ = __matmul__\n __rmul__ = __rmatmul__\n __imul__ = __imatmul__\n\n def __getitem__(self, item):\n if item == 0:\n return float(self.a)\n elif item == 1:\n return float(self.b)\n elif item == 2:\n return float(self.c)\n elif item == 3:\n return float(self.d)\n elif item == 4:\n return self.e\n elif item == 5:\n return self.f\n\n def __setitem__(self, key, value):\n if key == 0:\n self.a = value\n elif key == 1:\n self.b = value\n elif key == 2:\n self.c = value\n elif key == 3:\n self.d = value\n elif key == 4:\n self.e = value\n elif key == 5:\n self.f = value\n\n def __repr__(self):\n return \"Matrix(%s, %s, %s, %s, %s, %s)\" % (\n Length.str(self.a),\n Length.str(self.b),\n Length.str(self.c),\n Length.str(self.d),\n Length.str(self.e),\n Length.str(self.f),\n )\n\n def __copy__(self):\n return Matrix(self.a, self.b, self.c, self.d, self.e, self.f)\n\n def __str__(self):\n \"\"\"\n Many of SVG's graphics operations utilize 2x3:\n\n :returns string representation of matrix.\n \"\"\"\n return \"[%3f, %3f,\\n %3f, %3f, %s, %s]\" % (\n self.a,\n self.c,\n self.b,\n self.d,\n self.e,\n self.f,\n )\n\n def parse(self, transform_str):\n \"\"\"Parses the svg transform string.\n\n Transforms from SVG 1.1 have a smaller complete set of operations. Whereas in SVG 2.0 they gain\n the CSS transforms and the additional functions and parsing that go with that. This parse is\n compatible with SVG 1.1 and the SVG 2.0 which includes the CSS 2d superset.\n\n CSS transforms have scalex() scaley() translatex(), translatey(), and skew() (deprecated).\n 2D CSS angles haves units: \"deg\" tau / 360, \"rad\" tau/tau, \"grad\" tau/400, \"turn\" tau.\n 2D CSS distances have length/percentages: \"px\", \"cm\", \"mm\", \"in\", \"pt\", etc. (+|-)?d+%\n\n In the case of percentages there must be a known height and width to properly create a matrix out of that.\n\n \"\"\"\n if not transform_str:\n return\n if not isinstance(transform_str, str):\n raise TypeError(\"Must provide a string to parse\")\n\n for sub_element in REGEX_TRANSFORM_TEMPLATE.findall(transform_str.lower()):\n name = sub_element[0]\n params = tuple(REGEX_TRANSFORM_PARAMETER.findall(sub_element[1]))\n params = [mag + units for mag, units in params]\n if SVG_TRANSFORM_MATRIX == name:\n params = map(float, params)\n self.pre_cat(*params)\n elif SVG_TRANSFORM_TRANSLATE == name:\n try:\n x_param = Length(params[0]).value()\n except IndexError:\n continue\n try:\n y_param = Length(params[1]).value()\n self.pre_translate(x_param, y_param)\n except IndexError:\n self.pre_translate(x_param)\n elif SVG_TRANSFORM_TRANSLATE_X == name:\n self.pre_translate(Length(params[0]).value(), 0)\n elif SVG_TRANSFORM_TRANSLATE_Y == name:\n self.pre_translate(0, Length(params[0]).value())\n elif SVG_TRANSFORM_SCALE == name:\n params = map(float, params)\n self.pre_scale(*params)\n elif SVG_TRANSFORM_SCALE_X == name:\n self.pre_scale(float(params[0]), 1)\n elif SVG_TRANSFORM_SCALE_Y == name:\n self.pre_scale(1, float(params[0]))\n elif SVG_TRANSFORM_ROTATE == name:\n angle = Angle.parse(params[0])\n try:\n x_param = Length(params[1]).value()\n except IndexError:\n self.pre_rotate(angle)\n continue\n try:\n y_param = Length(params[2]).value()\n self.pre_rotate(angle, x_param, y_param)\n except IndexError:\n self.pre_rotate(angle, x_param)\n elif SVG_TRANSFORM_SKEW == name:\n angle_a = Angle.parse(params[0])\n try:\n angle_b = Angle.parse(params[1])\n except IndexError: # this isn't valid.\n continue\n try:\n x_param = Length(params[2]).value()\n except IndexError:\n self.pre_skew(angle_a, angle_b)\n continue\n try:\n y_param = Length(params[3]).value()\n self.pre_skew(angle_a, angle_b, x_param, y_param)\n except IndexError:\n self.pre_skew(angle_a, angle_b, x_param)\n elif SVG_TRANSFORM_SKEW_X == name:\n angle_a = Angle.parse(params[0])\n try:\n x_param = Length(params[1]).value()\n except IndexError:\n self.pre_skew_x(angle_a)\n continue\n try:\n y_param = Length(params[2]).value()\n self.pre_skew_x(angle_a, x_param, y_param)\n except IndexError:\n self.pre_skew_x(angle_a, x_param)\n elif SVG_TRANSFORM_SKEW_Y == name:\n angle_b = Angle.parse(params[0])\n try:\n x_param = Length(params[1]).value()\n except IndexError:\n self.pre_skew_y(angle_b)\n continue\n try:\n y_param = Length(params[2]).value()\n self.pre_skew_y(angle_b, x_param, y_param)\n except IndexError:\n self.pre_skew_y(angle_b, x_param)\n return self\n\n def render(\n self,\n ppi=None,\n relative_length=None,\n width=None,\n height=None,\n font_size=None,\n font_height=None,\n viewbox=None,\n **kwargs\n ):\n \"\"\"\n Provides values to turn trans_x and trans_y values into user units floats rather\n than Lengths by giving the required information to perform the conversions.\n \"\"\"\n if isinstance(self.e, Length):\n if width is None and relative_length is not None:\n width = relative_length\n self.e = self.e.value(\n ppi=ppi,\n relative_length=width,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n\n if isinstance(self.f, Length):\n if height is None and relative_length is not None:\n height = relative_length\n self.f = self.f.value(\n ppi=ppi,\n relative_length=height,\n font_size=font_size,\n font_height=font_height,\n viewbox=viewbox,\n )\n return self\n\n @property\n def determinant(self):\n return self.a * self.d - self.c * self.b\n\n def value_trans_x(self):\n return self.e\n\n def value_trans_y(self):\n return self.f\n\n def value_scale_x(self):\n return float(self.a)\n\n def value_scale_y(self):\n return float(self.d)\n\n def value_skew_x(self):\n return float(self.b)\n\n def value_skew_y(self):\n return float(self.c)\n\n def reset(self):\n \"\"\"Resets matrix to identity.\"\"\"\n self.a = 1.0\n self.b = 0.0\n self.c = 0.0\n self.d = 1.0\n\n self.e = 0.0\n self.f = 0.0\n\n def inverse(self):\n \"\"\"\n SVG Matrix:\n [a c e]\n [b d f]\n \"\"\"\n m00 = self.a\n m01 = self.c\n m02 = self.e\n m10 = self.b\n m11 = self.d\n m12 = self.f\n determinant = m00 * m11 - m01 * m10\n inverse_determinant = 1.0 / determinant\n self.a = m11 * inverse_determinant\n self.c = -m01 * inverse_determinant\n self.b = -m10 * inverse_determinant\n self.d = m00 * inverse_determinant\n\n self.e = (m01 * m12 - m02 * m11) * inverse_determinant\n self.f = (m10 * m02 - m00 * m12) * inverse_determinant\n return self\n\n def vector(self):\n \"\"\"\n provide the matrix suitable for multiplying vectors. This will be the matrix with the same rotation and scale\n aspects but with no translation. This matrix is for multiplying vector elements where the position doesn't\n matter but the scaling and rotation do.\n :return:\n \"\"\"\n return Matrix(self.a, self.b, self.c, self.d, 0.0, 0.0)\n\n def is_identity(self):\n return (\n self.a == 1\n and self.b == 0\n and self.c == 0\n and self.d == 1\n and self.e == 0\n and self.f == 0\n )\n\n def post_cat(self, *components):\n mx = Matrix(*components)\n self.__imatmul__(mx)\n\n def post_scale(self, sx=1.0, sy=None, x=0.0, y=0.0):\n if sy is None:\n sy = sx\n if x is None:\n x = 0.0\n if y is None:\n y = 0.0\n if x == 0 and y == 0:\n self.post_cat(Matrix.scale(sx, sy))\n else:\n self.post_translate(-x, -y)\n self.post_scale(sx, sy)\n self.post_translate(x, y)\n\n def post_scale_x(self, sx=1.0, x=0.0, y=0.0):\n self.post_scale(sx, 1, x, y)\n\n def post_scale_y(self, sy=1.0, x=0.0, y=0.0):\n self.post_scale(1, sy, x, y)\n\n def post_translate(self, tx=0.0, ty=0.0):\n self.post_cat(Matrix.translate(tx, ty))\n\n def post_translate_x(self, tx=0.0):\n self.post_translate(tx, 0.0)\n\n def post_translate_y(self, ty=0.0):\n self.post_translate(0.0, ty)\n\n def post_rotate(self, angle, x=0.0, y=0.0):\n if x is None:\n x = 0.0\n if y is None:\n y = 0.0\n if x == 0 and y == 0:\n self.post_cat(Matrix.rotate(angle)) # self %= self.get_rotate(theta)\n else:\n matrix = Matrix()\n matrix.post_translate(-x, -y)\n matrix.post_cat(Matrix.rotate(angle))\n matrix.post_translate(x, y)\n self.post_cat(matrix)\n\n def post_skew(self, angle_a=0.0, angle_b=0.0, x=0.0, y=0.0):\n if x is None:\n x = 0\n if y is None:\n y = 0\n if x == 0 and y == 0:\n self.post_cat(Matrix.skew(angle_a, angle_b))\n else:\n self.post_translate(-x, -y)\n self.post_skew(angle_a, angle_b)\n self.post_translate(x, y)\n\n def post_skew_x(self, angle_a=0.0, x=0.0, y=0.0):\n self.post_skew(angle_a, 0.0, x, y)\n\n def post_skew_y(self, angle_b=0.0, x=0.0, y=0.0):\n self.post_skew(0.0, angle_b, x, y)\n\n def pre_cat(self, *components):\n mx = Matrix(*components)\n self.a, self.b, self.c, self.d, self.e, self.f = Matrix.matrix_multiply(\n mx, self\n )\n\n def pre_scale(self, sx=1.0, sy=None, x=0.0, y=0.0):\n if sy is None:\n sy = sx\n if x is None:\n x = 0.0\n if y is None:\n y = 0.0\n if x == 0 and y == 0:\n self.pre_cat(Matrix.scale(sx, sy))\n else:\n self.pre_translate(x, y)\n self.pre_scale(sx, sy)\n self.pre_translate(-x, -y)\n\n def pre_scale_x(self, sx=1.0, x=0.0, y=0.0):\n self.pre_scale(sx, 1, x, y)\n\n def pre_scale_y(self, sy=1.0, x=0.0, y=0.0):\n self.pre_scale(1, sy, x, y)\n\n def pre_translate(self, tx=0.0, ty=0.0):\n self.pre_cat(Matrix.translate(tx, ty))\n\n def pre_translate_x(self, tx=0.0):\n self.pre_translate(tx, 0.0)\n\n def pre_translate_y(self, ty=0.0):\n self.pre_translate(0.0, ty)\n\n def pre_rotate(self, angle, x=0.0, y=0.0):\n if x is None:\n x = 0\n if y is None:\n y = 0\n if x == 0 and y == 0:\n self.pre_cat(Matrix.rotate(angle))\n else:\n self.pre_translate(x, y)\n self.pre_rotate(angle)\n self.pre_translate(-x, -y)\n\n def pre_skew(self, angle_a=0.0, angle_b=0.0, x=0.0, y=0.0):\n if x is None:\n x = 0\n if y is None:\n y = 0\n if x == 0 and y == 0:\n self.pre_cat(Matrix.skew(angle_a, angle_b))\n else:\n self.pre_translate(x, y)\n self.pre_skew(angle_a, angle_b)\n self.pre_translate(-x, -y)\n\n def pre_skew_x(self, angle_a=0.0, x=0.0, y=0.0):\n self.pre_skew(angle_a, 0, x, y)\n\n def pre_skew_y(self, angle_b=0.0, x=0.0, y=0.0):\n self.pre_skew(0.0, angle_b, x, y)\n\n def point_in_inverse_space(self, v0):\n inverse = Matrix(self)\n inverse.inverse()\n return inverse.point_in_matrix_space(v0)\n\n def point_in_matrix_space(self, v0):\n return Point(\n v0[0] * self.a + v0[1] * self.c + 1 * self.e,\n v0[0] * self.b + v0[1] * self.d + 1 * self.f,\n )\n\n def transform_point(self, v):\n nx = v[0] * self.a + v[1] * self.c + 1 * self.e\n ny = v[0] * self.b + v[1] * self.d + 1 * self.f\n v[0] = nx\n v[1] = ny\n return v\n\n def transform_vector(self, v):\n \"\"\"\n Applies the transformation without the translation.\n \"\"\"\n nx = v[0] * self.a + v[1] * self.c\n ny = v[0] * self.b + v[1] * self.d\n v[0] = nx\n v[1] = ny\n return v\n\n @classmethod\n def scale(cls, sx=1.0, sy=None):\n if sy is None:\n sy = sx\n return cls(sx, 0, 0, sy, 0, 0)\n\n @classmethod\n def scale_x(cls, sx=1.0):\n return cls.scale(sx, 1.0)\n\n @classmethod\n def scale_y(cls, sy=1.0):\n return cls.scale(1.0, sy)\n\n @classmethod\n def translate(cls, tx=0.0, ty=0.0):\n \"\"\"SVG Matrix:\n [a c e]\n [b d f]\n \"\"\"\n return cls(1.0, 0.0, 0.0, 1.0, tx, ty)\n\n @classmethod\n def translate_x(cls, tx=0.0):\n return cls.translate(tx, 0)\n\n @classmethod\n def translate_y(cls, ty=0.0):\n return cls.translate(0.0, ty)\n\n @classmethod\n def rotate(cls, angle=0.0):\n ct = cos(angle)\n st = sin(angle)\n return cls(ct, st, -st, ct, 0.0, 0.0)\n\n @classmethod\n def skew(cls, angle_a=0.0, angle_b=0.0):\n aa = tan(angle_a)\n bb = tan(angle_b)\n return cls(1.0, bb, aa, 1.0, 0.0, 0.0)\n\n @classmethod\n def skew_x(cls, angle=0.0):\n return cls.skew(angle, 0.0)\n\n @classmethod\n def skew_y(cls, angle=0.0):\n return cls.skew(0.0, angle)\n\n @classmethod\n def identity(cls):\n \"\"\"\n 1, 0, 0,\n 0, 1, 0,\n \"\"\"\n return cls()\n\n @staticmethod\n def matrix_multiply(m, s):\n \"\"\"\n [a c e] [a c e] [a b 0]\n [b d f] % [b d f] = [c d 0]\n [0 0 1] [0 0 1] [e f 1]\n\n :param m0: matrix operand\n :param m1: matrix operand\n :return: muliplied matrix.\n \"\"\"\n r0 = (\n s.a * m.a + s.c * m.b + s.e * 0,\n s.a * m.c + s.c * m.d + s.e * 0,\n s.a * m.e + s.c * m.f + s.e * 1,\n )\n\n r1 = (\n s.b * m.a + s.d * m.b + s.f * 0,\n s.b * m.c + s.d * m.d + s.f * 0,\n s.b * m.e + s.d * m.f + s.f * 1,\n )\n return float(r0[0]), float(r1[0]), float(r0[1]), float(r1[1]), r0[2], r1[2]\n\n\nclass Viewbox:\n def __init__(self, viewbox, preserve_aspect_ratio=None):\n \"\"\"\n Viewbox controls the scaling between the drawing size view that is observing that drawing.\n\n :param viewbox: either values or viewbox attribute or a Viewbox object\n :param preserve_aspect_ratio: preserveAspectRatio\n \"\"\"\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.preserve_aspect_ratio = preserve_aspect_ratio\n if isinstance(viewbox, dict):\n self.property_by_values(viewbox)\n elif isinstance(viewbox, Viewbox):\n self.property_by_object(viewbox)\n else:\n self.set_viewbox(viewbox)\n\n def __str__(self):\n return \"%s %s %s %s\" % (\n Length.str(self.x),\n Length.str(self.y),\n Length.str(self.width),\n Length.str(self.height),\n )\n\n def property_by_object(self, obj):\n self.x = obj.x\n self.y = obj.y\n self.width = obj.width\n self.height = obj.height\n self.preserve_aspect_ratio = obj.preserve_aspect_ratio\n\n def property_by_values(self, values):\n viewbox = values.get(SVG_ATTR_VIEWBOX)\n if viewbox is not None:\n self.set_viewbox(viewbox)\n if SVG_ATTR_PRESERVEASPECTRATIO in values:\n self.preserve_aspect_ratio = values[SVG_ATTR_PRESERVEASPECTRATIO]\n\n def set_viewbox(self, viewbox):\n if viewbox is not None:\n dims = list(REGEX_FLOAT.findall(viewbox))\n try:\n self.x = float(dims[0])\n self.y = float(dims[1])\n self.width = float(dims[2])\n self.height = float(dims[3])\n except IndexError:\n pass\n\n def transform(self, element):\n return Viewbox.viewbox_transform(\n element.x,\n element.y,\n element.width,\n element.height,\n self.x,\n self.y,\n self.width,\n self.height,\n self.preserve_aspect_ratio,\n )\n\n @staticmethod\n def viewbox_transform(\n e_x, e_y, e_width, e_height, vb_x, vb_y, vb_width, vb_height, aspect\n ):\n \"\"\"\n SVG 1.1 7.2, SVG 2.0 8.2 equivalent transform of an SVG viewport.\n With regards to https://github.com/w3c/svgwg/issues/215 use 8.2 version.\n\n It creates transform commands equal to that viewport expected.\n\n :param svg_node: dict containing the relevant svg entries.\n :return: string of the SVG transform commands to account for the viewbox.\n \"\"\"\n\n # Let e-x, e-y, e-width, e-height be the position and size of the element respectively.\n\n # Let vb-x, vb-y, vb-width, vb-height be the min-x, min-y,\n # width and height values of the viewBox attribute respectively.\n\n # Let align be the align value of preserveAspectRatio, or 'xMidYMid' if preserveAspectRatio is not defined.\n # Let meetOrSlice be the meetOrSlice value of preserveAspectRatio, or 'meet' if preserveAspectRatio is not defined\n # or if meetOrSlice is missing from this value.\n if (\n e_x is None\n or e_y is None\n or e_width is None\n or e_height is None\n or vb_x is None\n or vb_y is None\n or vb_width is None\n or vb_height is None\n ):\n return \"\"\n if aspect is not None:\n aspect_slice = aspect.split(\" \")\n try:\n align = aspect_slice[0]\n except IndexError:\n align = \"xMidyMid\"\n try:\n meet_or_slice = aspect_slice[1]\n except IndexError:\n meet_or_slice = \"meet\"\n else:\n align = \"xMidyMid\"\n meet_or_slice = \"meet\"\n # Initialize scale-x to e-width/vb-width.\n scale_x = e_width / vb_width\n # Initialize scale-y to e-height/vb-height.\n scale_y = e_height / vb_height\n\n # If align is not 'none' and meetOrSlice is 'meet', set the larger of scale-x and scale-y to the smaller.\n if align != SVG_VALUE_NONE and meet_or_slice == \"meet\":\n scale_x = scale_y = min(scale_x, scale_y)\n # Otherwise, if align is not 'none' and meetOrSlice is 'slice', set the smaller of scale-x and scale-y to the larger\n elif align != SVG_VALUE_NONE and meet_or_slice == \"slice\":\n scale_x = scale_y = max(scale_x, scale_y)\n # Initialize translate-x to e-x - (vb-x * scale-x).\n translate_x = e_x - (vb_x * scale_x)\n # Initialize translate-y to e-y - (vb-y * scale-y)\n translate_y = e_y - (vb_y * scale_y)\n # If align contains 'xMid', add (e-width - vb-width * scale-x) / 2 to translate-x.\n align = align.lower()\n if \"xmid\" in align:\n translate_x += (e_width - vb_width * scale_x) / 2.0\n # If align contains 'xMax', add (e-width - vb-width * scale-x) to translate-x.\n if \"xmax\" in align:\n translate_x += e_width - vb_width * scale_x\n # If align contains 'yMid', add (e-height - vb-height * scale-y) / 2 to translate-y.\n if \"ymid\" in align:\n translate_y += (e_height - vb_height * scale_y) / 2.0\n # If align contains 'yMax', add (e-height - vb-height * scale-y) to translate-y.\n if \"ymax\" in align:\n translate_y += e_height - vb_height * scale_y\n # The transform applied to content contained by the element is given by:\n # translate(translate-x, translate-y) scale(scale-x, scale-y)\n if isinstance(scale_x, Length) or isinstance(scale_y, Length):\n raise ValueError\n if translate_x == 0 and translate_y == 0:\n if scale_x == 1 and scale_y == 1:\n return \"\" # Nothing happens.\n else:\n return \"scale(%s, %s)\" % (Length.str(scale_x), Length.str(scale_y))\n else:\n if scale_x == 1 and scale_y == 1:\n return \"translate(%s, %s)\" % (\n Length.str(translate_x),\n Length.str(translate_y),\n )\n else:\n return \"translate(%s, %s) scale(%s, %s)\" % (\n Length.str(translate_x),\n Length.str(translate_y),\n Length.str(scale_x),\n Length.str(scale_y),\n )\n\n\nclass SVGElement(object):\n \"\"\"\n Any element within the SVG namespace.\n\n if args[0] is a dict or SVGElement class the value is used to seed the values.\n Else, the values consist of the kwargs used. The priority is such that kwargs\n will overwrite any previously set value.\n\n If additional args exist these will be passed to property_by_args\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.id = None\n self.values = None\n if len(args) >= 1:\n s = args[0]\n if isinstance(s, dict):\n args = args[1:]\n self.values = dict(s)\n self.values.update(kwargs)\n elif isinstance(s, SVGElement):\n args = args[1:]\n self.property_by_object(s)\n self.property_by_args(*args)\n return\n if self.values is None:\n self.values = dict(kwargs)\n self.property_by_values(self.values)\n if len(args) != 0:\n self.property_by_args(*args)\n\n def property_by_args(self, *args):\n pass\n\n def property_by_object(self, obj):\n self.id = obj.id\n self.values = dict(obj.values)\n\n def property_by_values(self, values):\n self.id = values.get(SVG_ATTR_ID)\n\n def render(self, **kwargs):\n \"\"\"\n Render changes any length/percent values or attributes into real usable limits if\n given the information required to change such parameters.\n\n :param kwargs: various other properties to be rendered with.\n :return:\n \"\"\"\n pass\n\n def set(self, key, value):\n self.values[key] = value\n return self\n\n\nclass Transformable:\n \"\"\"Any element that is transformable and has a transform property.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._length = None\n self._lengths = None\n self.transform = None\n self.apply = None\n\n def property_by_object(self, s):\n self.transform = Matrix(s.transform)\n self.apply = s.apply\n\n def property_by_values(self, values):\n self.transform = Matrix(values.get(SVG_ATTR_TRANSFORM, \"\"))\n self.apply = bool(values.get(\"apply\", True))\n\n def __mul__(self, other):\n if isinstance(other, (Matrix, str)):\n n = copy(self)\n n *= other\n return n\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n self.transform *= other\n return self\n\n def __abs__(self):\n \"\"\"\n The absolute value is taken to be the actual shape transformed.\n :return: transformed version of the given shape.\n \"\"\"\n m = copy(self)\n m.reify()\n return m\n\n def reify(self):\n \"\"\"\n Realizes the transform to the attributes. Such that the attributes become actualized and the transform\n simplifies towards the identity matrix. In many cases it will become the identity matrix. In other cases the\n transformed shape cannot be represented through the properties alone. And shall keep those parts of the\n transform required preserve equivalency.\n\n The default method will be called by submethods but will only scale properties like stroke_width which should\n scale with the transform.\n \"\"\"\n self._lengths = None\n self._length = None\n\n def render(self, **kwargs):\n \"\"\"\n Renders the transformable by performing any required length conversion operations into pixels. The element\n will be the pixel-length form.\n \"\"\"\n if self.transform is not None:\n self.transform.render(**kwargs)\n return self\n\n def bbox(self, transformed=True):\n \"\"\"\n Returns the bounding box of the given object.\n\n :param transformed: whether this is the transformed bounds or default.\n :return:\n \"\"\"\n raise NotImplementedError\n\n @property\n def rotation(self):\n if not self.apply:\n return Angle.degrees(0)\n prx = Point(1, 0)\n prx *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.angle_to(prx)\n\n\nclass GraphicObject:\n \"\"\"Any drawn element.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.stroke = None\n self.fill = None\n self.stroke_width = None\n\n def property_by_object(self, s):\n self.fill = Color(s.fill) if s.fill is not None else None\n self.stroke = Color(s.stroke) if s.stroke is not None else None\n self.stroke_width = (\n Length(s.stroke_width).value() if s.stroke_width is not None else None\n )\n\n def property_by_values(self, values):\n stroke = values.get(SVG_ATTR_STROKE)\n self.stroke = Color(stroke) if stroke is not None else None\n stroke_opacity = values.get(SVG_ATTR_STROKE_OPACITY)\n if (\n stroke_opacity is not None\n and self.stroke is not None\n and self.stroke.value is not None\n ):\n try:\n self.stroke.opacity = float(stroke_opacity)\n except ValueError:\n pass\n fill = values.get(SVG_ATTR_FILL)\n self.fill = Color(fill) if fill is not None else None\n fill_opacity = values.get(SVG_ATTR_FILL_OPACITY)\n if (\n fill_opacity is not None\n and self.fill is not None\n and self.fill.value is not None\n ):\n try:\n self.fill.opacity = float(fill_opacity)\n except ValueError:\n pass\n self.stroke_width = Length(values.get(SVG_ATTR_STROKE_WIDTH, 1.0)).value()\n\n def render(self, **kwargs):\n if isinstance(self.stroke_width, Length):\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n self.stroke_width = self.stroke_width.value(\n relative_length=sqrt(width * width + height * height), **kwargs\n )\n # A percentage stroke_width is always computed as a percentage of the normalized viewBox diagonal length.\n\n def reify(self):\n \"\"\"\n Realizes the transform to the attributes. Such that the attributes become actualized and the transform\n simplifies towards the identity matrix. In many cases it will become the identity matrix. In other cases the\n transformed shape cannot be represented through the properties alone. And shall keep those parts of the\n transform required preserve equivalency.\n \"\"\"\n self.stroke_width = self.implicit_stroke_width\n return self\n\n @property\n def implicit_stroke_width(self):\n try:\n if not self.apply:\n return self.stroke_width\n if self.stroke_width is not None:\n if (\n hasattr(self, \"values\")\n and SVG_ATTR_VECTOR_EFFECT in self.values\n and SVG_VALUE_NON_SCALING_STROKE\n in self.values[SVG_ATTR_VECTOR_EFFECT]\n ):\n return self.stroke_width # we are not to scale the stroke.\n width = self.stroke_width\n det = self.transform.determinant\n return width * sqrt(abs(det))\n except AttributeError:\n return self.stroke_width\n\n\nclass Shape(SVGElement, GraphicObject, Transformable):\n \"\"\"\n SVG Shapes are several SVG items defined in SVG 1.1 9.1\n https://www.w3.org/TR/SVG11/shapes.html\n\n These shapes are circle, ellipse, line, polyline, polygon, and path.\n\n All shapes have methods:\n d(relative, transform): provides path_d string for the shape.\n reify(): Applies transform of the shape to modify the shape attributes.\n render(): Ensure that the shape properties have real space values.\n bbox(transformed): Provides the bounding box for the given shape.\n\n All shapes must implement:\n __repr__(), with a call to _repr_shape()\n __copy__()\n\n All shapes have attributes:\n id: SVG ID attributes. (SVGElement)\n transform: SVG Matrix to apply to this shape. (Transformable)\n apply: Determine whether transform should be applied. (Transformable)\n fill: SVG color of the shape fill. (GraphicObject)\n stroke: SVG color of the shape stroke. (GraphicObject)\n stroke_width: Stroke width of the stroke. (GraphicObject)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n Transformable.__init__(self, *args, **kwargs)\n GraphicObject.__init__(self, *args, **kwargs)\n SVGElement.__init__(\n self, *args, **kwargs\n ) # Must go last, triggers, by_object, by_value, by_arg functions.\n\n def property_by_object(self, s):\n SVGElement.property_by_object(self, s)\n Transformable.property_by_object(self, s)\n GraphicObject.property_by_object(self, s)\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n Transformable.property_by_values(self, values)\n GraphicObject.property_by_values(self, values)\n\n def render(self, **kwargs):\n SVGElement.render(self, **kwargs)\n Transformable.render(self, **kwargs)\n GraphicObject.render(self, **kwargs)\n\n def __eq__(self, other):\n if not isinstance(other, Shape):\n return NotImplemented\n if self.fill != other.fill or self.stroke != other.stroke:\n return False\n first = self\n if not isinstance(first, Path):\n first = Path(first)\n second = other\n if not isinstance(second, Path):\n second = Path(second)\n return first == second\n\n def __ne__(self, other):\n if not isinstance(other, Shape):\n return NotImplemented\n return not self == other\n\n def __iadd__(self, other):\n if isinstance(other, Shape):\n return Path(self) + Path(other)\n return NotImplemented\n\n __add__ = __iadd__\n\n def __matmul__(self, other):\n m = copy(self)\n m.__imatmul__(other)\n return m\n\n def __rmatmul__(self, other):\n m = copy(other)\n m.__imatmul__(self)\n return m\n\n def __imatmul__(self, other):\n \"\"\"\n The % operation with a matrix works much like multiplication except that it automatically reifies the shape.\n \"\"\"\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n self.transform *= other\n self.reify()\n return self\n\n def _calc_lengths(self, error=ERROR, min_depth=MIN_DEPTH, segments=None):\n \"\"\"\n Calculate the length values for the segments of the Shape.\n\n :param error: error permitted for length calculations.\n :param min_depth: minimum depth for the length calculation.\n :param segments: optional segments to use.\n :return:\n \"\"\"\n if segments is None:\n segments = self.segments(False)\n if self._length is not None:\n return\n lengths = [each.length(error=error, min_depth=min_depth) for each in segments]\n self._length = sum(lengths)\n if self._length == 0:\n self._lengths = lengths\n else:\n self._lengths = [each / self._length for each in lengths]\n\n def npoint(self, positions, error=ERROR):\n \"\"\"\n Find a points between 0 and 1 within the shape. Numpy acceleration allows points to be an array of floats.\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n return [self.point(pos) for pos in positions]\n\n segments = self.segments(False)\n if len(segments) == 0:\n return None\n # Shortcuts\n if self._length is None:\n self._calc_lengths(error=error, segments=segments)\n xy = np.empty((len(positions), 2), dtype=float)\n if self._length == 0:\n i = int(round(positions * (len(segments) - 1)))\n point = segments[i].point(0.0)\n xy[:] = point\n return xy\n\n # Find which segment the point we search for is located on:\n segment_start = 0\n for index, segment in enumerate(segments):\n segment_end = segment_start + self._lengths[index]\n position_subset = (segment_start <= positions) & (positions < segment_end)\n v0 = positions[position_subset]\n if not len(v0):\n continue # Nothing matched.\n d = segment_end - segment_start\n if d == 0: # This segment is 0 length.\n segment_pos = 0.0\n else:\n segment_pos = (v0 - segment_start) / d\n c = segment.npoint(segment_pos)\n xy[position_subset] = c[:]\n segment_start = segment_end\n\n # the loop above will miss position == 1\n xy[positions == 1] = np.array(list(segments[-1].end))\n return xy\n\n def point(self, position, error=ERROR):\n \"\"\"\n Find a point between 0 and 1 within the Shape, going through the shape with regard to position.\n\n :param position: value between 0 and 1 within the shape.\n :param error: Length error permitted.\n :return: Point at the given location.\n \"\"\"\n segments = self.segments(False)\n if len(segments) == 0:\n return None\n # Shortcuts\n try:\n if position <= 0.0:\n return segments[0].point(position)\n if position >= 1.0:\n return segments[-1].point(position)\n except ValueError:\n return self.npoint([position], error=error)[0]\n\n if self._length is None:\n self._calc_lengths(error=error, segments=segments)\n\n if self._length == 0:\n i = int(round(position * (len(segments) - 1)))\n return segments[i].point(0.0)\n # Find which segment the point we search for is located on:\n segment_start = 0\n segment_pos = 0\n segment = segments[0]\n for index, segment in enumerate(segments):\n segment_end = segment_start + self._lengths[index]\n if segment_end >= position:\n # This is the segment! How far in on the segment is the point?\n segment_pos = (position - segment_start) / (segment_end - segment_start)\n break\n segment_start = segment_end\n return segment.point(segment_pos)\n\n def length(self, error=ERROR, min_depth=MIN_DEPTH):\n self._calc_lengths(error, min_depth)\n return self._length\n\n def segments(self, transformed=True):\n \"\"\"\n Returns PathSegments which correctly produce this shape.\n\n This should be implemented by subclasses.\n \"\"\"\n raise NotImplementedError\n\n def d(self, relative=False, transformed=True):\n \"\"\"\n Returns the path_d string of the shape.\n\n :param relative: Returns path_d in relative form.\n :param transformed: Return path_d, with applied transform.\n :return: path_d string\n \"\"\"\n return Path(self.segments(transformed=transformed)).d(relative=relative)\n\n def bbox(self, transformed=True):\n \"\"\"\n Get the bounding box for the given shape.\n \"\"\"\n bbs = [\n seg.bbox()\n for seg in self.segments(transformed=False)\n if not isinstance(Close, Move)\n ]\n try:\n xmins, ymins, xmaxs, ymaxs = list(zip(*bbs))\n except ValueError:\n return None # No bounding box items existed. So no bounding box.\n xmin = min(xmins)\n xmax = max(xmaxs)\n ymin = min(ymins)\n ymax = max(ymaxs)\n if transformed:\n p0 = self.transform.transform_point([xmin, ymin])\n p1 = self.transform.transform_point([xmin, ymax])\n p2 = self.transform.transform_point([xmax, ymin])\n p3 = self.transform.transform_point([xmax, ymax])\n xmin = min(p0[0], p1[0], p2[0], p3[0])\n ymin = min(p0[1], p1[1], p2[1], p3[1])\n xmax = max(p0[0], p1[0], p2[0], p3[0])\n ymax = max(p0[1], p1[1], p2[1], p3[1])\n return xmin, ymin, xmax, ymax\n\n def _init_shape(self, *args):\n \"\"\"\n Generic SVG parsing of args. In those cases where the shape accepts finite elements we can process the last\n four elements of the shape with this code. This will happen in simpleline, roundshape, and rect. It will not\n happen in polyshape or paths since these can accept infinite arguments.\n \"\"\"\n\n arg_length = len(args)\n\n if arg_length >= 1:\n if args[0] is not None:\n self.transform = Matrix(args[0])\n if arg_length >= 2:\n if args[1] is not None:\n self.stroke = Color(args[1])\n if arg_length >= 3:\n if args[2] is not None:\n self.fill = Color(args[2])\n if arg_length >= 4:\n if args[3] is not None:\n self.apply = bool(args[3])\n\n def _repr_shape(self, values):\n \"\"\"\n Generic pieces of repr shape.\n \"\"\"\n if not self.transform.is_identity():\n values.append(\"transform=%s\" % repr(self.transform))\n if self.stroke is not None:\n values.append(\"stroke='%s'\" % self.stroke)\n if self.fill is not None:\n values.append(\"fill='%s'\" % self.fill)\n if self.stroke_width is not None and self.stroke_width != 1.0:\n values.append(\"stroke_width='%s'\" % str(self.stroke_width))\n if self.apply is not None and not self.apply:\n values.append(\"apply=%s\" % self.apply)\n if self.id is not None:\n values.append(\"id='%s'\" % self.id)\n\n def _name(self):\n return self.__class__.__name__\n\n\nclass PathSegment:\n \"\"\"\n Path Segments are the base class for all the segment within a Path.\n These are defined in SVG 1.1 8.3 and SVG 2.0 9.3\n https://www.w3.org/TR/SVG11/paths.html#PathData\n https://www.w3.org/TR/SVG2/paths.html#PathElement\n\n These segments define a 1:1 relationship with the path_d or path data attribute, denoted in\n SVG by the 'd' attribute. These are moveto, closepath, lineto, and the curves which are cubic\n bezier curves, quadratic bezier curves, and elliptical arc. These are classed as Move, Close,\n Line, CubicBezier, QuadraticBezier, and Arc. And in path_d are denoted as M, Z, L, C, Q, A.\n\n There are lowercase versions of these commands. And for C, and Q there are S and T which are\n smooth versions. For lines there are also V and H commands which denote vertical and horizontal\n versions of the line command.\n\n The major difference between paths in 1.1 and 2.0 is the use of Z to truncate a command to close.\n \"M0,0C 0,100 100,0 z is valid in 2.0 since the last z replaces the 0,0. These are read by\n svg.elements but they are not written.\n \"\"\"\n\n def __init__(self, **kwargs):\n try:\n self.relative = bool(kwargs[\"relative\"])\n except (KeyError, ValueError):\n self.relative = False\n try:\n self.smooth = bool(kwargs[\"smooth\"])\n except (KeyError, ValueError):\n self.smooth = True\n self.start = None\n self.end = None\n\n def __mul__(self, other):\n if isinstance(other, (Matrix, str)):\n n = copy(self)\n n *= other\n return n\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __iadd__(self, other):\n if isinstance(other, PathSegment):\n path = Path(self, other)\n return path\n elif isinstance(other, str):\n path = Path(self) + other\n return path\n return NotImplemented\n\n __add__ = __iadd__\n\n def __str__(self):\n \"\"\"\n This defines an individual path segment string. Since this isn't part of a Path it appends a pseudo-Move\n command to correctly provide the starting position.\n :return: string representation of the object.\n \"\"\"\n d = self.d()\n if self.start is not None:\n if self.relative:\n return \"m %s %s\" % (self.start, d)\n else:\n return \"M %s %s\" % (self.start, d)\n return d\n\n def __iter__(self):\n self.n = -1\n return self\n\n def __next__(self):\n self.n += 1\n try:\n val = self[self.n]\n if val is None:\n self.n += 1\n val = self[self.n]\n return val\n except IndexError:\n raise StopIteration\n\n next = __next__\n\n @staticmethod\n def segment_length(\n curve,\n start=0.0,\n end=1.0,\n start_point=None,\n end_point=None,\n error=ERROR,\n min_depth=MIN_DEPTH,\n depth=0,\n ):\n \"\"\"Recursively approximates the length by straight lines\"\"\"\n if start_point is None:\n start_point = curve.point(start)\n if end_point is None:\n end_point = curve.point(end)\n mid = (start + end) / 2.0\n mid_point = curve.point(mid)\n length = abs(end_point - start_point)\n first_half = abs(mid_point - start_point)\n second_half = abs(end_point - mid_point)\n\n length2 = first_half + second_half\n if (length2 - length > error) or (depth < min_depth):\n # Calculate the length of each segment:\n depth += 1\n return PathSegment.segment_length(\n curve, start, mid, start_point, mid_point, error, min_depth, depth\n ) + PathSegment.segment_length(\n curve, mid, end, mid_point, end_point, error, min_depth, depth\n )\n # This is accurate enough.\n return length2\n\n def _line_length(self, start=0.0, end=1.0, error=ERROR, min_depth=MIN_DEPTH):\n return PathSegment.segment_length(\n self, start, end, error=error, min_depth=min_depth\n )\n\n def bbox(self):\n \"\"\"returns the bounding box for the segment.\n xmin, ymin, xmax, ymax\n \"\"\"\n xs = [p.x for p in self if p is not None]\n ys = [p.y for p in self if p is not None]\n xmin = min(xs)\n xmax = max(xs)\n ymin = min(ys)\n ymax = max(ys)\n return xmin, ymin, xmax, ymax\n\n def reverse(self):\n \"\"\"\n Reverses the current path segment.\n \"\"\"\n end = self.end\n self.end = self.start\n self.start = end\n\n def point(self, position):\n \"\"\"\n Returns the point at a given amount through the path segment.\n :param position: t value between 0 and 1\n :return: Point instance\n \"\"\"\n return Point(self.npoint([position])[0])\n\n def npoint(self, positions):\n \"\"\"\n Returns the points at given positions along the path segment\n :param positions: N-sized sequence of t value between 0 and 1\n :return: N-sized sequence of 2-sized sequence of float\n \"\"\"\n return [self.end] * len(positions)\n\n def length(self, error=ERROR, min_depth=MIN_DEPTH):\n \"\"\"\n Returns the length of this path segment.\n\n :param error:\n :param min_depth:\n :return:\n \"\"\"\n return 0\n\n def d(self, current_point=None, relative=None, smooth=None):\n \"\"\"Returns the fragment path_d value for the current path segment.\n\n For a relative segment the current_point must be provided. If it is omitted then only an absolute segment\n can be returned.\"\"\"\n raise NotImplementedError\n\n\nclass Move(PathSegment):\n \"\"\"Represents move commands. Moves to a new location without any path distance.\n Paths that consist of only move commands, are valid.\n\n Move serve to make discontinuous paths into continuous linked paths segments\n with non-drawn sections.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Move commands most importantly go to a place. So if one location is given, that's the end point.\n If two locations are given then first is the start location.\n\n For many Move commands it is not necessary to have an original start location. The start point provides a\n linked locations for some elements that may require it. If known it can be provided.\n\n Move(p) where p is the End point.\n Move(s,e) where s is the Start point, e is the End point.\n Move(p, start=s) where p is End point, s is the Start point.\n Move(p, end=e) where p is the Start point, e is the End point.\n Move(start=s, end=e) where s is the Start point, e is the End point.\n \"\"\"\n PathSegment.__init__(self, **kwargs)\n self.end = None\n self.start = None\n if len(args) == 0:\n if \"end\" in kwargs:\n self.end = kwargs[\"end\"]\n if \"start\" in kwargs:\n self.start = kwargs[\"start\"]\n elif len(args) == 1:\n if len(kwargs) == 0:\n self.end = args[0]\n else:\n if \"end\" in kwargs:\n self.start = args[0]\n self.end = kwargs[\"end\"]\n elif \"start\" in kwargs:\n self.start = kwargs[\"start\"]\n self.end = args[0]\n elif len(args) == 2:\n self.start = args[0]\n self.end = args[1]\n if self.start is not None:\n self.start = Point(self.start)\n if self.end is not None:\n self.end = Point(self.end)\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n if self.start is not None:\n self.start *= other\n if self.end is not None:\n self.end *= other\n return self\n\n def __repr__(self):\n if self.start is None:\n return \"Move(end=%s)\" % repr(self.end)\n else:\n return \"Move(start=%s, end=%s)\" % (repr(self.start), repr(self.end))\n\n def __copy__(self):\n return Move(self.start, self.end, relative=self.relative)\n\n def __eq__(self, other):\n if not isinstance(other, Move):\n return NotImplemented\n return self.start == other.start and self.end == other.end\n\n def __ne__(self, other):\n if not isinstance(other, Move):\n return NotImplemented\n return not self == other\n\n def __len__(self):\n return 2\n\n def __getitem__(self, item):\n if item == 0:\n return self.start\n elif item == 1:\n return self.end\n else:\n raise IndexError\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"M %s\" % self.end\n return \"m %s\" % (self.end - current_point)\n\n\nclass Curve(PathSegment):\n \"\"\"Represents curve commands\"\"\"\n\n def __init__(self, start=None, end=None, **kwargs):\n PathSegment.__init__(self, **kwargs)\n self.start = Point(start) if start is not None else None\n self.end = Point(end) if end is not None else None\n\n\nclass Linear(PathSegment):\n \"\"\"Represents line commands.\"\"\"\n\n def __init__(self, start=None, end=None, **kwargs):\n PathSegment.__init__(self, **kwargs)\n self.start = Point(start) if start is not None else None\n self.end = Point(end) if end is not None else None\n\n def __copy__(self):\n return self.__class__(self.start, self.end, relative=self.relative)\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return self.start == other.start and self.end == other.end\n\n def __ne__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return not self == other\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n if self.start is not None:\n self.start *= other\n if self.end is not None:\n self.end *= other\n return self\n\n def __len__(self):\n return 2\n\n def __getitem__(self, item):\n if item == 0:\n return self.start\n elif item == 1:\n return self.end\n else:\n raise IndexError\n\n def npoint(self, positions):\n try:\n import numpy as np\n\n xy = np.empty(shape=(len(positions), 2), dtype=float)\n xy[:, 0] = np.interp(positions, [0, 1], [self.start.x, self.end.x])\n xy[:, 1] = np.interp(positions, [0, 1], [self.start.y, self.end.y])\n return xy\n except ImportError:\n return [Point.towards(self.start, self.end, pos) for pos in positions]\n\n def length(self, error=None, min_depth=None):\n if self.start is not None and self.end is not None:\n return Point.distance(self.end, self.start)\n else:\n return 0\n\n def closest_segment_point(self, p, respect_bounds=True):\n \"\"\" Gives the point on the line closest to the given point. \"\"\"\n a = self.start\n b = self.end\n vAPx = p[0] - a.x\n vAPy = p[1] - a.y\n vABx = b.x - a.x\n vABy = b.y - a.y\n sqDistanceAB = vABx * vABx + vABy * vABy\n ABAPproduct = vABx * vAPx + vABy * vAPy\n if sqDistanceAB == 0:\n return 0 # Line is point.\n amount = ABAPproduct / float(sqDistanceAB)\n if respect_bounds:\n if amount > 1:\n amount = 1\n if amount < 0:\n amount = 0\n return self.point(amount)\n\n def d(self, current_point=None, relative=None, smooth=None):\n raise NotImplementedError\n\n\nclass Close(Linear):\n \"\"\"Represents close commands. If this exists at the end of the shape then the shape is closed.\n the methodology of a single flag close fails in a couple ways. You can have multi-part shapes\n which can close or not close several times.\n \"\"\"\n\n def __repr__(self):\n if self.start is None and self.end is None:\n return \"Close()\"\n s = self.start\n if s is not None:\n s = repr(s)\n e = self.end\n if e is not None:\n e = repr(e)\n return \"Close(start=%s, end=%s)\" % (s, e)\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"Z\"\n else:\n return \"z\"\n\n\nclass Line(Linear):\n \"\"\"Represents line commands.\"\"\"\n\n def __repr__(self):\n if self.start is None:\n return \"Line(end=%s)\" % (repr(self.end))\n return \"Line(start=%s, end=%s)\" % (repr(self.start), repr(self.end))\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"L %s\" % self.end\n else:\n return \"l %s\" % (self.end - current_point)\n\n\nclass QuadraticBezier(Curve):\n \"\"\"Represents Quadratic Bezier commands.\"\"\"\n\n def __init__(self, start, control, end, **kwargs):\n Curve.__init__(self, start, end, **kwargs)\n self.control = Point(control) if control is not None else None\n\n def __repr__(self):\n return \"QuadraticBezier(start=%s, control=%s, end=%s)\" % (\n repr(self.start),\n repr(self.control),\n repr(self.end),\n )\n\n def __copy__(self):\n return QuadraticBezier(\n self.start,\n self.control,\n self.end,\n relative=self.relative,\n smooth=self.smooth,\n )\n\n def __eq__(self, other):\n if not isinstance(other, QuadraticBezier):\n return NotImplemented\n return (\n self.start == other.start\n and self.end == other.end\n and self.control == other.control\n )\n\n def __ne__(self, other):\n if not isinstance(other, QuadraticBezier):\n return NotImplemented\n return not self == other\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n if self.start is not None:\n self.start *= other\n if self.control is not None:\n self.control *= other\n if self.end is not None:\n self.end *= other\n return self\n\n def __len__(self):\n return 3\n\n def __getitem__(self, item):\n if item == 0:\n return self.start\n elif item == 1:\n return self.control\n elif item == 2:\n return self.end\n raise IndexError\n\n def npoint(self, positions):\n \"\"\"Calculate the x,y position at a certain position of the path. `pos` may be a\n float or a NumPy array.\"\"\"\n x0, y0 = self.start\n x1, y1 = self.control\n x2, y2 = self.end\n\n def _compute_point(position):\n # compute factors\n n_pos = 1 - position\n pos_2 = position ** 2\n n_pos_2 = n_pos ** 2\n n_pos_pos = n_pos * position\n\n return (\n n_pos_2 * x0 + 2 * n_pos_pos * x1 + pos_2 * x2,\n n_pos_2 * y0 + 2 * n_pos_pos * y1 + pos_2 * y2,\n )\n\n try:\n import numpy as np\n\n xy = np.empty(shape=(len(positions), 2))\n xy[:, 0], xy[:, 1] = _compute_point(np.array(positions))\n return xy\n except ImportError:\n return [Point(*_compute_point(position)) for position in positions]\n\n def bbox(self):\n \"\"\"\n Returns the bounding box for the quadratic bezier curve.\n \"\"\"\n n = self.start.x - self.control.x\n d = self.start.x - 2 * self.control.x + self.end.x\n if d != 0:\n t = n / float(d)\n else:\n t = 0.5\n if 0 < t < 1:\n x_values = [self.start.x, self.end.x, self.point(t).x]\n else:\n x_values = [self.start.x, self.end.x]\n n = self.start.y - self.control.y\n d = self.start.y - 2 * self.control.y + self.end.y\n if d != 0:\n t = n / float(d)\n else:\n t = 0.5\n if 0 < t < 1:\n y_values = [self.start.y, self.end.y, self.point(t).y]\n else:\n y_values = [self.start.y, self.end.y]\n return min(x_values), min(y_values), max(x_values), max(y_values)\n\n def length(self, error=None, min_depth=None):\n \"\"\"Calculate the length of the path up to a certain position\"\"\"\n a = self.start - 2 * self.control + self.end\n b = 2 * (self.control - self.start)\n try:\n # For an explanation of this case, see\n # http://www.malczak.info/blog/quadratic-bezier-curve-length/\n A = 4 * (a.real ** 2 + a.imag ** 2)\n B = 4 * (a.real * b.real + a.imag * b.imag)\n C = b.real ** 2 + b.imag ** 2\n\n Sabc = 2 * sqrt(A + B + C)\n A2 = sqrt(A)\n A32 = 2 * A * A2\n C2 = 2 * sqrt(C)\n BA = B / A2\n\n s = (\n A32 * Sabc\n + A2 * B * (Sabc - C2)\n + (4 * C * A - B ** 2) * log((2 * A2 + BA + Sabc) / (BA + C2))\n ) / (4 * A32)\n except (ZeroDivisionError, ValueError):\n # a_dot_b = a.real * b.real + a.imag * b.imag\n if abs(a) < 1e-10:\n s = abs(b)\n else:\n k = abs(b) / abs(a)\n if k >= 2:\n s = abs(b) - abs(a)\n else:\n s = abs(a) * (k ** 2 / 2 - k + 1)\n return s\n\n def is_smooth_from(self, previous):\n \"\"\"Checks if this segment would be a smooth segment following the previous\"\"\"\n if isinstance(previous, QuadraticBezier):\n return self.start == previous.end and (self.control - self.start) == (\n previous.end - previous.control\n )\n else:\n return self.control == self.start\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (smooth is None and self.smooth) or (smooth is not None and smooth):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"T %s\" % self.end\n else:\n return \"t %s\" % (self.end - current_point)\n else:\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"Q %s %s\" % (self.control, self.end)\n else:\n return \"q %s %s\" % (\n self.control - current_point,\n self.end - current_point,\n )\n\n\nclass CubicBezier(Curve):\n \"\"\"Represents Cubic Bezier commands.\"\"\"\n\n def __init__(self, start, control1, control2, end, **kwargs):\n Curve.__init__(self, start, end, **kwargs)\n self.control1 = Point(control1) if control1 is not None else None\n self.control2 = Point(control2) if control1 is not None else None\n\n def __repr__(self):\n return \"CubicBezier(start=%s, control1=%s, control2=%s, end=%s)\" % (\n repr(self.start),\n repr(self.control1),\n repr(self.control2),\n repr(self.end),\n )\n\n def __copy__(self):\n return CubicBezier(\n self.start,\n self.control1,\n self.control2,\n self.end,\n relative=self.relative,\n smooth=self.smooth,\n )\n\n def __eq__(self, other):\n if not isinstance(other, CubicBezier):\n return NotImplemented\n return (\n self.start == other.start\n and self.end == other.end\n and self.control1 == other.control1\n and self.control2 == other.control2\n )\n\n def __ne__(self, other):\n if not isinstance(other, CubicBezier):\n return NotImplemented\n return not self == other\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n if self.start is not None:\n self.start *= other\n if self.control1 is not None:\n self.control1 *= other\n if self.control2 is not None:\n self.control2 *= other\n if self.end is not None:\n self.end *= other\n return self\n\n def __len__(self):\n return 4\n\n def __getitem__(self, item):\n if item == 0:\n return self.start\n elif item == 1:\n return self.control1\n elif item == 2:\n return self.control2\n elif item == 3:\n return self.end\n else:\n raise IndexError\n\n def reverse(self):\n PathSegment.reverse(self)\n c2 = self.control2\n self.control2 = self.control1\n self.control1 = c2\n\n def npoint(self, positions):\n \"\"\"Calculate the x,y position at a certain position of the path. `pos` may be a\n float or a NumPy array.\"\"\"\n x0, y0 = self.start\n x1, y1 = self.control1\n x2, y2 = self.control2\n x3, y3 = self.end\n\n def _compute_point(position):\n # compute factors\n pos_3 = position ** 3\n n_pos = 1 - position\n n_pos_3 = n_pos ** 3\n pos_2_n_pos = position * position * n_pos\n n_pos_2_pos = n_pos * n_pos * position\n return (\n n_pos_3 * x0 + 3 * (n_pos_2_pos * x1 + pos_2_n_pos * x2) + pos_3 * x3,\n n_pos_3 * y0 + 3 * (n_pos_2_pos * y1 + pos_2_n_pos * y2) + pos_3 * y3,\n )\n\n try:\n import numpy as np\n\n xy = np.empty(shape=(len(positions), 2))\n xy[:, 0], xy[:, 1] = _compute_point(np.array(positions))\n return xy\n except ImportError:\n return [Point(*_compute_point(position)) for position in positions]\n\n def bbox(self):\n \"\"\"returns the tight fitting bounding box of the bezier curve.\n Code by:\n https://github.com/mathandy/svgpathtools\n \"\"\"\n xmin, xmax = self._real_minmax(0)\n ymin, ymax = self._real_minmax(1)\n return xmin, ymin, xmax, ymax\n\n def _real_minmax(self, v):\n \"\"\"returns the minimum and maximum for a real cubic bezier, with a non-zero denom\n Code by:\n https://github.com/mathandy/svgpathtools\n \"\"\"\n local_extremizers = [0, 1]\n a = [c[v] for c in self]\n denom = a[0] - 3 * a[1] + 3 * a[2] - a[3]\n if abs(denom) >= 1e-12:\n delta = a[1] ** 2 - (a[0] + a[1]) * a[2] + a[2] ** 2 + (a[0] - a[1]) * a[3]\n if delta >= 0: # otherwise no local extrema\n sqdelta = sqrt(delta)\n tau = a[0] - 2 * a[1] + a[2]\n r1 = (tau + sqdelta) / denom\n r2 = (tau - sqdelta) / denom\n if 0 < r1 < 1:\n local_extremizers.append(r1)\n if 0 < r2 < 1:\n local_extremizers.append(r2)\n else:\n local_extremizers.append(0.5)\n local_extrema = [self.point(t)[v] for t in local_extremizers]\n return min(local_extrema), max(local_extrema)\n\n def _length_scipy(self, error=ERROR):\n from scipy.integrate import quad\n\n p0 = complex(*self.start)\n p1 = complex(*self.control1)\n p2 = complex(*self.control2)\n p3 = complex(*self.end)\n\n def _abs_derivative(t):\n return abs(\n 3 * (p1 - p0) * (1 - t) ** 2\n + 6 * (p2 - p1) * (1 - t) * t\n + 3 * (p3 - p2) * t ** 2\n )\n\n return quad(_abs_derivative, 0.0, 1.0, epsabs=error, limit=1000)[0]\n\n def _length_default(self, error=ERROR, min_depth=MIN_DEPTH):\n return self._line_length(0, 1, error, min_depth)\n\n def length(self, error=ERROR, min_depth=MIN_DEPTH):\n \"\"\"Calculate the length of the path up to a certain position\"\"\"\n try:\n return self._length_scipy(error)\n except ImportError:\n return self._length_default(error, min_depth)\n\n def is_smooth_from(self, previous):\n \"\"\"Checks if this segment would be a smooth segment following the previous\"\"\"\n if isinstance(previous, CubicBezier):\n return self.start == previous.end and (self.control1 - self.start) == (\n previous.end - previous.control2\n )\n else:\n return self.control1 == self.start\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (smooth is None and self.smooth) or (smooth is not None and smooth):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"S %s %s\" % (self.control2, self.end)\n else:\n return \"s %s %s\" % (\n self.control2 - current_point,\n self.end - current_point,\n )\n else:\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"C %s %s %s\" % (self.control1, self.control2, self.end)\n else:\n return \"c %s %s %s\" % (\n self.control1 - current_point,\n self.control2 - current_point,\n self.end - current_point,\n )\n\n\nclass Arc(Curve):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Represents Arc commands.\n\n Arc objects can take different parameters to create arcs.\n Since we expect taking in SVG parameters. We accept SVG parameterization which is:\n start, rx, ry, rotation, arc_flag, sweep_flag, end.\n\n To do matrix transitions, the native parameterization is start, end, center, prx, pry, sweep\n\n 'start, end, center, prx, pry' are points and sweep amount is a t value in tau radians.\n If points are modified by an affine transformation, the arc is transformed.\n There is a special case for when the scale factor inverts, it inverts the sweep.\n\n Note: t-values are not angles from center in elliptical arcs. These are the same thing in\n circular arcs. But, here t is a parameterization around the ellipse, as if it were a circle.\n The position on the arc is (a * cos(t), b * sin(t)). If r-major was 0 for example. The\n positions would all fall on the x-axis. And the angle from center would all be either 0 or\n tau/2. However, since t is the parameterization we can conceptualize it as a position on a\n circle which is then scaled and rotated by a matrix.\n\n prx is the point at t 0 in the ellipse.\n pry is the point at t tau/4 in the ellipse.\n prx -> center -> pry should form a right triangle.\n\n The rotation can be defined as the angle from center to prx. Since prx is located at\n t(0) its deviation can only be the result of a rotation.\n\n Sweep is a value in t.\n The sweep angle can be a value greater than tau and less than -tau.\n However if this is the case, conversion back to Path.d() is expected to fail.\n We can denote these arc events but not as a single command.\n\n start_t + sweep = end_t\n \"\"\"\n Curve.__init__(self, **kwargs)\n self.center = None\n self.prx = None\n self.pry = None\n self.sweep = None\n if len(args) == 6 and isinstance(args[1], complex):\n self._svg_complex_parameterize(*args)\n return\n elif len(kwargs) == 6 and \"rotation\" in kwargs:\n self._svg_complex_parameterize(**kwargs)\n return\n elif len(args) == 7:\n # This is an svg parameterized call.\n # A: rx ry x-axis-rotation large-arc-flag sweep-flag x y\n self._svg_parameterize(\n args[0], args[1], args[2], args[3], args[4], args[5], args[6]\n )\n return\n if (\n \"left\" in kwargs\n and \"right\" in kwargs\n and \"top\" in kwargs\n and \"bottom\" in kwargs\n ):\n left = kwargs[\"left\"]\n right = kwargs[\"right\"]\n top = kwargs[\"top\"]\n bottom = kwargs[\"bottom\"]\n self.center = Point((left + right) / 2.0, (top + bottom) / 2.0)\n rx = (right - left) / 2.0\n ry = (bottom - top) / 2.0\n self.prx = Point(self.center.x + rx, self.center.y)\n self.pry = Point(self.center.x, self.center.y + ry)\n len_args = len(args)\n if len_args > 0:\n if args[0] is not None:\n self.start = Point(args[0])\n if len_args > 1:\n if args[1] is not None:\n self.end = Point(args[1])\n if len_args > 2:\n if args[2] is not None:\n self.center = Point(args[2])\n if len_args > 3:\n if args[3] is not None:\n self.prx = Point(args[3])\n if len_args > 4:\n if args[4] is not None:\n self.pry = Point(args[4])\n if len_args > 5:\n self.sweep = args[5]\n return # The args gave us everything.\n if \"start\" in kwargs:\n self.start = Point(kwargs[\"start\"])\n if \"end\" in kwargs:\n self.end = Point(kwargs[\"end\"])\n if \"center\" in kwargs:\n self.center = Point(kwargs[\"center\"])\n if \"prx\" in kwargs:\n self.prx = Point(kwargs[\"prx\"])\n if \"pry\" in kwargs:\n self.pry = Point(kwargs[\"pry\"])\n if \"sweep\" in kwargs:\n self.sweep = kwargs[\"sweep\"]\n cw = True # Clockwise default. (sometimes needed)\n if self.start is not None and self.end is not None and self.center is None:\n # Start and end, but no center.\n # Solutions require a radius, a control point, or a bulge\n control = None\n sagitta = None\n if \"bulge\" in kwargs:\n bulge = float(kwargs[\"bulge\"])\n sagitta = bulge * self.start.distance_to(self.end) / 2.0\n elif \"sagitta\" in kwargs:\n sagitta = float(kwargs[\"sagitta\"])\n if sagitta is not None:\n control = Point.towards(self.start, self.end, 0.5)\n angle = self.start.angle_to(self.end)\n control = control.polar_to(angle - tau / 4.0, sagitta)\n if \"control\" in kwargs: # Control is any additional point on the arc.\n control = Point(kwargs[\"control\"])\n if control is not None:\n delta_a = control - self.start\n delta_b = self.end - control\n try:\n slope_a = delta_a.y / delta_a.x\n except ZeroDivisionError:\n slope_a = float(\"inf\")\n try:\n slope_b = delta_b.y / delta_b.x\n except ZeroDivisionError:\n slope_b = float(\"inf\")\n ab_mid = Point.towards(self.start, control, 0.5)\n bc_mid = Point.towards(control, self.end, 0.5)\n if delta_a.y == 0: # slope_a == 0\n cx = ab_mid.x\n if delta_b.x == 0: # slope_b == inf\n cy = bc_mid.y\n else:\n cy = bc_mid.y + (bc_mid.x - cx) / slope_b\n elif delta_b.y == 0: # slope_b == 0\n cx = bc_mid.x\n if delta_a.y == 0: # slope_a == inf\n cy = ab_mid.y\n else:\n cy = ab_mid.y + (ab_mid.x - cx) / slope_a\n elif delta_a.x == 0: # slope_a == inf\n cy = ab_mid.y\n cx = slope_b * (bc_mid.y - cy) + bc_mid.x\n elif delta_b.x == 0: # slope_b == inf\n cy = bc_mid.y\n cx = slope_a * (ab_mid.y - cy) + ab_mid.x\n elif slope_a == slope_b:\n cx = ab_mid.x\n cy = ab_mid.y\n else:\n cx = (\n slope_a * slope_b * (ab_mid.y - bc_mid.y)\n - slope_a * bc_mid.x\n + slope_b * ab_mid.x\n ) / (slope_b - slope_a)\n cy = ab_mid.y - (cx - ab_mid.x) / slope_a\n self.center = Point(cx, cy)\n cw = bool(Point.orientation(self.start, control, self.end) == 2)\n elif \"r\" in kwargs:\n r = kwargs[\"r\"]\n mid = Point(\n (self.start.x + self.end.x) / 2.0, (self.start.y + self.end.y) / 2.0\n )\n q = Point.distance(self.start, self.end)\n hq = q / 2.0\n if r < hq:\n kwargs[\"r\"] = r = hq # Correct potential math domain error.\n self.center = Point(\n mid.x + sqrt(r ** 2 - hq ** 2) * (self.start.y - self.end.y) / q,\n mid.y + sqrt(r ** 2 - hq ** 2) * (self.end.x - self.start.x) / q,\n )\n cw = bool(Point.orientation(self.start, self.center, self.end) == 1)\n if \"ccw\" in kwargs and kwargs[\"ccw\"] and cw or not cw:\n # ccw arg exists, is true, and we found the cw center, or we didn't find the cw center.\n self.center = Point(\n mid.x\n - sqrt(r ** 2 - hq ** 2) * (self.start.y - self.end.y) / q,\n mid.y\n - sqrt(r ** 2 - hq ** 2) * (self.end.x - self.start.x) / q,\n )\n elif \"rx\" in kwargs and \"ry\" in kwargs:\n # This formulation will assume p1 and p2 are both axis aligned.\n rx = kwargs[\"rx\"]\n ry = kwargs[\"ry\"]\n # We will assume rx == abs(self.start.x - self.end.x)\n self.center = Point(self.start.x, self.end.y)\n cw = bool(Point.orientation(self.start, self.center, self.end) == 1)\n if \"ccw\" in kwargs and kwargs[\"ccw\"] and cw or not cw:\n self.center = Point(self.end.x, self.start.y)\n self.sweep = tau / 4.0\n\n if self.center is None:\n raise ValueError(\"Not enough values to solve for center.\")\n if \"r\" in kwargs:\n r = kwargs[\"r\"]\n if self.prx is None:\n self.prx = Point(self.center.x + r, self.center.y)\n if self.pry is None:\n self.pry = Point(self.center.x, self.center.y + r)\n if \"rx\" in kwargs:\n rx = kwargs[\"rx\"]\n if self.prx is None:\n if \"rotation\" in kwargs:\n theta = kwargs[\"rotation\"]\n self.prx = Point.polar(self.center, theta, rx)\n else:\n self.prx = Point(self.center.x + rx, self.center.y)\n if \"ry\" in kwargs:\n ry = kwargs[\"ry\"]\n if self.pry is None:\n if \"rotation\" in kwargs:\n theta = kwargs[\"rotation\"]\n theta += tau / 4.0\n self.pry = Point.polar(self.center, theta, ry)\n else:\n self.pry = Point(self.center.x, self.center.y + ry)\n if self.start is not None and (self.prx is None or self.pry is None):\n radius_s = Point.distance(self.center, self.start)\n self.prx = Point(self.center.x + radius_s, self.center.y)\n self.pry = Point(self.center.x, self.center.y + radius_s)\n if self.end is not None and (self.prx is None or self.pry is None):\n radius_e = Point.distance(self.center, self.end)\n self.prx = Point(self.center.x + radius_e, self.center.y)\n self.pry = Point(self.center.x, self.center.y + radius_e)\n if self.sweep is None and self.start is not None and self.end is not None:\n start_t = self.get_start_t()\n end_t = self.get_end_t()\n self.sweep = end_t - start_t\n if \"ccw\" in kwargs:\n cw = not bool(kwargs[\"ccw\"])\n if cw and self.sweep < 0:\n self.sweep += tau\n if not cw and self.sweep > 0:\n self.sweep -= tau\n if self.sweep is not None and self.start is not None and self.end is None:\n start_t = self.get_start_t()\n end_t = start_t + self.sweep\n self.end = self.point_at_t(end_t)\n if self.sweep is not None and self.start is None and self.end is not None:\n end_t = self.get_end_t()\n start_t = end_t - self.sweep\n self.end = self.point_at_t(start_t)\n\n def __repr__(self):\n return \"Arc(%s, %s, %s, %s, %s, %s)\" % (\n repr(self.start),\n repr(self.end),\n repr(self.center),\n repr(self.prx),\n repr(self.pry),\n self.sweep,\n )\n\n def __copy__(self):\n return Arc(\n self.start,\n self.end,\n self.center,\n self.prx,\n self.pry,\n self.sweep,\n relative=self.relative,\n )\n\n def __eq__(self, other):\n if not isinstance(other, Arc):\n return NotImplemented\n return (\n self.start == other.start\n and self.end == other.end\n and self.prx == other.prx\n and self.pry == other.pry\n and self.center == other.center\n and self.sweep == other.sweep\n )\n\n def __ne__(self, other):\n if not isinstance(other, Arc):\n return NotImplemented\n return not self == other\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n if self.start is not None:\n self.start *= other\n if self.center is not None:\n self.center *= other\n if self.end is not None:\n self.end *= other\n if self.prx is not None:\n self.prx *= other\n if self.pry is not None:\n self.pry *= other\n if other.value_scale_x() < 0:\n self.sweep = -self.sweep\n if other.value_scale_y() < 0:\n self.sweep = -self.sweep\n return self\n\n def __len__(self):\n return 5\n\n def __getitem__(self, item):\n if item == 0:\n return self.start\n elif item == 1:\n return self.end\n elif item == 2:\n return self.center\n elif item == 3:\n return self.prx\n elif item == 4:\n return self.pry\n raise IndexError\n\n @property\n def theta(self):\n \"\"\"legacy property\"\"\"\n return Angle.radians(self.get_start_t()).as_positive_degrees\n\n @property\n def delta(self):\n \"\"\"legacy property\"\"\"\n return Angle.radians(self.sweep).as_degrees\n\n def reverse(self):\n PathSegment.reverse(self)\n self.sweep = -self.sweep\n\n def npoint(self, positions):\n try:\n import numpy as np\n\n return self._points_numpy(np.array(positions))\n except ImportError:\n if self.start == self.end and self.sweep == 0:\n # This is equivalent of omitting the segment\n return [self.start] * len(positions)\n\n start_t = self.get_start_t()\n return [\n self.start\n if pos == 0\n else self.end\n if pos == 1\n else self.point_at_t(start_t + self.sweep * pos)\n for pos in positions\n ]\n\n def _points_numpy(self, positions):\n \"\"\"Vectorized version of `point()`.\n\n :param positions: 1D numpy array of float in [0, 1]\n :return: 1D numpy array of complex\n \"\"\"\n import numpy as np\n\n xy = np.empty((len(positions), 2), dtype=float)\n\n if self.start == self.end and self.sweep == 0:\n xy[:, 0], xy[:, 1] = self.start\n else:\n t = self.get_start_t() + self.sweep * positions\n\n rotation = self.get_rotation()\n a = self.rx\n b = self.ry\n cx = self.center.x\n cy = self.center.y\n cos_rot = cos(rotation)\n sin_rot = sin(rotation)\n cos_t = np.cos(t)\n sin_t = np.sin(t)\n xy[:, 0] = cx + a * cos_t * cos_rot - b * sin_t * sin_rot\n xy[:, 1] = cy + a * cos_t * sin_rot + b * sin_t * cos_rot\n\n # ensure clean endings\n xy[positions == 0, :] = list(self.start)\n xy[positions == 1, :] = list(self.end)\n\n return xy\n\n def _integral_length(self):\n def ellipse_part_integral(t1, t2, a, b, n=100000):\n # function to integrate\n def f(t):\n return sqrt(1 - (1 - a ** 2 / b ** 2) * sin(t) ** 2)\n\n start = min(t1, t2)\n seg_len = abs(t1 - t2) / n\n return b * sum(f(start + seg_len * i) * seg_len for i in range(1, n + 1))\n\n start_angle = self.get_start_t()\n end_angle = start_angle + self.sweep\n return ellipse_part_integral(start_angle, end_angle, self.rx, self.ry)\n\n def _exact_length(self):\n \"\"\"scipy is not a dependency. However, if scipy exists this function will find the\n exact arc length. By default .length() delegates to here and on failure uses the\n fallback method.\"\"\"\n from scipy.special import ellipeinc\n\n a = self.rx\n b = self.ry\n phi = self.get_start_t()\n m = 1 - (a / b) ** 2\n d1 = ellipeinc(phi, m)\n phi = phi + self.sweep\n m = 1 - (a / b) ** 2\n d2 = ellipeinc(phi, m)\n return b * abs(d2 - d1)\n\n def length(self, error=ERROR, min_depth=MIN_DEPTH):\n \"\"\"The length of an elliptical arc segment requires numerical\n integration, and in that case it's simpler to just do a geometric\n approximation, as for cubic bezier curves.\n \"\"\"\n if self.sweep == 0:\n return 0\n if self.start == self.end and self.sweep == 0:\n # This is equivalent of omitting the segment\n return 0\n a = self.rx\n b = self.ry\n d = abs(a - b)\n\n if d < ERROR: # This is a circle.\n return abs(self.rx * self.sweep)\n try:\n return self._exact_length()\n except ImportError:\n return self._line_length(error=error, min_depth=min_depth)\n\n def _svg_complex_parameterize(\n self, start, radius, rotation, arc_flag, sweep_flag, end\n ):\n \"\"\"Parameterization with complex radius and having rotation factors.\"\"\"\n self._svg_parameterize(\n Point(start),\n radius.real,\n radius.imag,\n rotation,\n arc_flag,\n sweep_flag,\n Point(end),\n )\n\n def _svg_parameterize(\n self, start, rx, ry, rotation, large_arc_flag, sweep_flag, end\n ):\n \"\"\"Conversion from svg parameterization, our chosen native native form.\n http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes\"\"\"\n\n large_arc_flag = bool(large_arc_flag)\n sweep_flag = bool(sweep_flag)\n start = Point(start)\n self.start = start\n end = Point(end)\n self.end = end\n if start == end or rx == 0 or ry == 0:\n # If start is equal to end, there are infinite number of circles so these void out.\n # We still permit this kind of arc, but SVG parameterization cannot be used to achieve it.\n self.sweep = 0\n self.prx = Point(start)\n self.pry = Point(start)\n self.center = Point(start)\n return\n cosr = cos(radians(rotation))\n sinr = sin(radians(rotation))\n dx = (start.real - end.real) / 2\n dy = (start.imag - end.imag) / 2\n x1prim = cosr * dx + sinr * dy\n x1prim_sq = x1prim * x1prim\n y1prim = -sinr * dx + cosr * dy\n y1prim_sq = y1prim * y1prim\n\n rx_sq = rx * rx\n ry_sq = ry * ry\n\n # Correct out of range radii\n radius_check = (x1prim_sq / rx_sq) + (y1prim_sq / ry_sq)\n if radius_check > 1:\n rx *= sqrt(radius_check)\n ry *= sqrt(radius_check)\n rx_sq = rx * rx\n ry_sq = ry * ry\n\n t1 = rx_sq * y1prim_sq\n t2 = ry_sq * x1prim_sq\n c = sqrt(abs((rx_sq * ry_sq - t1 - t2) / (t1 + t2)))\n\n if large_arc_flag == sweep_flag:\n c = -c\n cxprim = c * rx * y1prim / ry\n cyprim = -c * ry * x1prim / rx\n\n center = Point(\n (cosr * cxprim - sinr * cyprim) + ((start.real + end.real) / 2),\n (sinr * cxprim + cosr * cyprim) + ((start.imag + end.imag) / 2),\n )\n\n ux = (x1prim - cxprim) / rx\n uy = (y1prim - cyprim) / ry\n vx = (-x1prim - cxprim) / rx\n vy = (-y1prim - cyprim) / ry\n n = sqrt(ux * ux + uy * uy)\n p = ux\n theta = degrees(acos(p / n))\n if uy < 0:\n theta = -theta\n theta = theta % 360\n\n n = sqrt((ux * ux + uy * uy) * (vx * vx + vy * vy))\n p = ux * vx + uy * vy\n d = p / n\n # In certain cases the above calculation can through inaccuracies\n # become just slightly out of range, f ex -1.0000000000000002.\n if d > 1.0:\n d = 1.0\n elif d < -1.0:\n d = -1.0\n delta = degrees(acos(d))\n if (ux * vy - uy * vx) < 0:\n delta = -delta\n delta = delta % 360\n if not sweep_flag:\n delta -= 360\n # built parameters, delta, theta, center\n\n rotate_matrix = Matrix()\n rotate_matrix.post_rotate(\n Angle.degrees(rotation).as_radians, center.x, center.y\n )\n\n self.center = center\n self.prx = Point(center.x + rx, center.y)\n self.pry = Point(center.x, center.y + ry)\n\n self.prx.matrix_transform(rotate_matrix)\n self.pry.matrix_transform(rotate_matrix)\n self.sweep = Angle.degrees(delta).as_radians\n\n def as_quad_curves(self, arc_required):\n if arc_required is None:\n sweep_limit = tau / 12.0\n arc_required = int(ceil(abs(self.sweep) / sweep_limit))\n if arc_required == 0:\n return\n t_slice = self.sweep / float(arc_required)\n\n current_t = self.get_start_t()\n p_start = self.start\n\n theta = self.get_rotation()\n cos_theta = cos(theta)\n sin_theta = sin(theta)\n\n a = self.rx\n b = self.ry\n cx = self.center.x\n cy = self.center.y\n\n for i in range(0, arc_required):\n next_t = current_t + t_slice\n mid_t = (next_t + current_t) / 2\n p_end = self.point_at_t(next_t)\n if i == arc_required - 1:\n p_end = self.end\n cos_mid_t = cos(mid_t)\n sin_mid_t = sin(mid_t)\n alpha = (4.0 - cos(t_slice)) / 3.0\n px = cx + alpha * (a * cos_mid_t * cos_theta - b * sin_mid_t * sin_theta)\n py = cy + alpha * (a * cos_mid_t * sin_theta + b * sin_mid_t * cos_theta)\n yield QuadraticBezier(p_start, (px, py), p_end)\n p_start = p_end\n current_t = next_t\n\n def as_cubic_curves(self, arc_required=None):\n if arc_required is None:\n sweep_limit = tau / 12.0\n arc_required = int(ceil(abs(self.sweep) / sweep_limit))\n if arc_required == 0:\n return\n t_slice = self.sweep / float(arc_required)\n\n theta = self.get_rotation()\n rx = self.rx\n ry = self.ry\n p_start = self.start\n current_t = self.get_start_t()\n x0 = self.center.x\n y0 = self.center.y\n cos_theta = cos(theta)\n sin_theta = sin(theta)\n\n for i in range(0, arc_required):\n next_t = current_t + t_slice\n\n alpha = (\n sin(t_slice) * (sqrt(4 + 3 * pow(tan((t_slice) / 2.0), 2)) - 1) / 3.0\n )\n\n cos_start_t = cos(current_t)\n sin_start_t = sin(current_t)\n\n ePrimen1x = -rx * cos_theta * sin_start_t - ry * sin_theta * cos_start_t\n ePrimen1y = -rx * sin_theta * sin_start_t + ry * cos_theta * cos_start_t\n\n cos_end_t = cos(next_t)\n sin_end_t = sin(next_t)\n\n p2En2x = x0 + rx * cos_end_t * cos_theta - ry * sin_end_t * sin_theta\n p2En2y = y0 + rx * cos_end_t * sin_theta + ry * sin_end_t * cos_theta\n p_end = (p2En2x, p2En2y)\n if i == arc_required - 1:\n p_end = self.end\n\n ePrimen2x = -rx * cos_theta * sin_end_t - ry * sin_theta * cos_end_t\n ePrimen2y = -rx * sin_theta * sin_end_t + ry * cos_theta * cos_end_t\n\n p_c1 = (p_start[0] + alpha * ePrimen1x, p_start[1] + alpha * ePrimen1y)\n p_c2 = (p_end[0] - alpha * ePrimen2x, p_end[1] - alpha * ePrimen2y)\n\n yield CubicBezier(p_start, p_c1, p_c2, p_end)\n p_start = Point(p_end)\n current_t = next_t\n\n def is_circular(self):\n a = self.rx\n b = self.ry\n return a == b\n\n @property\n def radius(self):\n \"\"\"Legacy complex radius property\n\n Point will work like a complex for legacy reasons.\n \"\"\"\n return Point(self.rx, self.ry)\n\n @property\n def rx(self):\n return Point.distance(self.center, self.prx)\n\n @property\n def ry(self):\n return Point.distance(self.center, self.pry)\n\n def get_rotation(self):\n return Point.angle(self.center, self.prx)\n\n def get_start_angle(self):\n \"\"\"\n :return: Angle from the center point to start point.\n \"\"\"\n return self.angle_at_point(self.start)\n\n def get_end_angle(self):\n \"\"\"\n :return: Angle from the center point to end point.\n \"\"\"\n return self.angle_at_point(self.end)\n\n def get_start_t(self):\n \"\"\"\n start t value in the ellipse.\n\n :return: t parameter of start point.\n \"\"\"\n return self.t_at_point(self.point_at_angle(self.get_start_angle()))\n\n def get_end_t(self):\n \"\"\"\n end t value in the ellipse.\n\n :return: t parameter of start point.\n \"\"\"\n return self.t_at_point(self.point_at_angle(self.get_end_angle()))\n\n def point_at_angle(self, angle):\n \"\"\"\n find the point on the ellipse from the center at the given angle.\n Note: For non-circular arcs this is different than point(t).\n\n :param angle: angle from center to find point\n :return: point found\n \"\"\"\n angle -= self.get_rotation()\n a = self.rx\n b = self.ry\n if a == b:\n return self.point_at_t(angle)\n t = atan2(a * tan(angle), b)\n tau_1_4 = tau / 4.0\n tau_3_4 = 3 * tau_1_4\n if tau_3_4 >= abs(angle) % tau > tau_1_4:\n t += tau / 2.0\n return self.point_at_t(t)\n\n def angle_at_point(self, p):\n \"\"\"\n find the angle to the point.\n\n :param p: point\n :return: angle to given point.\n \"\"\"\n return self.center.angle_to(p)\n\n def t_at_point(self, p):\n \"\"\"\n find the t parameter to at the point.\n\n :param p: point\n :return: t parameter to the given point.\n \"\"\"\n angle = self.angle_at_point(p)\n angle -= self.get_rotation()\n a = self.rx\n b = self.ry\n t = atan2(a * tan(angle), b)\n tau_1_4 = tau / 4.0\n tau_3_4 = 3 * tau_1_4\n if tau_3_4 >= abs(angle) % tau > tau_1_4:\n t += tau / 2.0\n return t\n\n def point_at_t(self, t):\n \"\"\"\n find the point that corresponds to given value t.\n Where t=0 is the first point and t=tau is the final point.\n\n In the case of a circle: t = angle.\n\n :param t:\n :return:\n \"\"\"\n rotation = self.get_rotation()\n a = self.rx\n b = self.ry\n cx = self.center.x\n cy = self.center.y\n cos_rot = cos(rotation)\n sin_rot = sin(rotation)\n cos_t = cos(t)\n sin_t = sin(t)\n px = cx + a * cos_t * cos_rot - b * sin_t * sin_rot\n py = cy + a * cos_t * sin_rot + b * sin_t * cos_rot\n return Point(px, py)\n\n def get_ellipse(self):\n return Ellipse(self.center, self.rx, self.ry, self.get_rotation())\n\n def bbox(self):\n \"\"\"Find the bounding box of a arc.\n Code from: https://github.com/mathandy/svgpathtools\n \"\"\"\n phi = self.get_rotation().as_radians\n if cos(phi) == 0:\n atan_x = tau / 4.0\n atan_y = 0\n elif sin(phi) == 0:\n atan_x = 0\n atan_y = tau / 4.0\n else:\n rx, ry = self.rx, self.ry\n atan_x = atan(-(ry / rx) * tan(phi))\n atan_y = atan((ry / rx) / tan(phi))\n\n def angle_inv(ang, k): # inverse of angle from Arc.derivative()\n return ((ang + (tau / 2.0) * k) * (360 / tau) - self.theta) / self.delta\n\n xtrema = [self.start.x, self.end.x]\n ytrema = [self.start.y, self.end.y]\n\n for k in range(-4, 5):\n tx = angle_inv(atan_x, k)\n ty = angle_inv(atan_y, k)\n if 0 <= tx <= 1:\n xtrema.append(self.point(tx).x)\n if 0 <= ty <= 1:\n ytrema.append(self.point(ty).y)\n\n return min(xtrema), min(ytrema), max(xtrema), max(ytrema)\n\n def d(self, current_point=None, relative=None, smooth=None):\n if (\n current_point is None\n or (relative is None and self.relative)\n or (relative is not None and not relative)\n ):\n return \"A %G,%G %G %d,%d %s\" % (\n self.rx,\n self.ry,\n self.get_rotation().as_degrees,\n int(abs(self.sweep) > (tau / 2.0)),\n int(self.sweep >= 0),\n self.end,\n )\n else:\n return \"a %G,%G %G %d,%d %s\" % (\n self.rx,\n self.ry,\n self.get_rotation().as_degrees,\n int(abs(self.sweep) > (tau / 2.0)),\n int(self.sweep >= 0),\n self.end - current_point,\n )\n\n\nclass Path(Shape, MutableSequence):\n \"\"\"\n A Path is a Mutable sequence of path segments\n\n It is a generalized shape which can map out all the other shapes.\n\n Each PathSegment object maps a particular command. Each one exists only once in each path and every point contained\n within the object is also unique. We attempt to internally maintain some validity. Each end point should link\n to the following segments start point. And each close point should connect from the preceding segments endpoint to\n the last Move command.\n\n These are soft checks made only at the time of addition and some manipulations. Modifying the points of the segments\n can and will cause path invalidity. Some SVG invalid operations are permitted such as arcs longer than tau radians\n or beginning sequences without a move. The expectation is that these will eventually be used as part of a valid path\n so these fragment paths are permitted. In some cases these invalid paths will still have consistent path_d values,\n in other cases, there will be no valid methods to reproduce these.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n Shape.__init__(self, *args, **kwargs)\n self._length = None\n self._lengths = None\n self._segments = list()\n if len(args) != 1:\n self._segments.extend(args)\n else:\n s = args[0]\n if isinstance(s, Subpath):\n self._segments.extend(s.segments(transformed=False))\n Shape.__init__(self, s._path)\n elif isinstance(s, Shape):\n self._segments.extend(s.segments(transformed=False))\n elif isinstance(s, str):\n self._segments = list()\n self.parse(s)\n elif isinstance(s, tuple):\n # We have no guarantee of the validity of the source data\n self._segments.extend(s)\n self.validate_connections()\n elif isinstance(s, list):\n # We have no guarantee of the validity of the source data\n self._segments.extend(s)\n self.validate_connections()\n elif isinstance(s, PathSegment):\n self._segments.append(s)\n if SVG_ATTR_DATA in self.values:\n if not self.values.get(\"pathd_loaded\", False):\n self.parse(self.values[SVG_ATTR_DATA])\n self.values[\"pathd_loaded\"] = True\n\n def __copy__(self):\n path = Path(self)\n segs = path._segments\n for i in range(0, len(segs)):\n segs[i] = copy(segs[i])\n return path\n\n def __getitem__(self, index):\n return self._segments[index]\n\n def _validate_subpath(self, index):\n \"\"\"ensure the subpath containing this index is valid.\"\"\"\n if index < 0 or index + 1 >= len(self._segments):\n return # This connection doesn't exist.\n for j in range(index, len(self._segments)):\n close_search = self._segments[j]\n if isinstance(close_search, Move):\n return # Not a closed path, subpath is valid.\n if isinstance(close_search, Close):\n for k in range(index, -1, -1):\n move_search = self._segments[k]\n if isinstance(move_search, Move):\n self._segments[j].end = Point(move_search.end)\n return\n self._segments[j].end = Point(self._segments[0].end)\n return\n\n def _validate_move(self, index):\n \"\"\"ensure the next closed point from this index points to a valid location.\"\"\"\n for i in range(index + 1, len(self._segments)):\n segment = self._segments[i]\n if isinstance(segment, Move):\n return # Not a closed path, the move is valid.\n if isinstance(segment, Close):\n segment.end = Point(self._segments[index].end)\n return\n\n def _validate_close(self, index):\n \"\"\"ensure the close element at this position correctly links to the previous move\"\"\"\n for i in range(index, -1, -1):\n segment = self._segments[i]\n if isinstance(segment, Move):\n self._segments[index].end = Point(segment.end)\n return\n self._segments[index].end = (\n Point(self._segments[0].end) if self._segments[0].end is not None else None\n )\n # If move is never found, just the end point of the first element. Unless that's not a thing.\n\n def _validate_connection(self, index, prefer_second=False):\n \"\"\"\n Validates the connection at the index.\n Connection 0 is the connection between getitem(0) and getitem(1)\n\n prefer_second is for those cases where failing the connection requires replacing\n a existing value. It will prefer the authority of right side, second value.\n \"\"\"\n if index < 0 or index + 1 >= len(self._segments):\n return # This connection doesn't exist.\n first = self._segments[index]\n second = self._segments[index + 1]\n if first.end is not None and second.start is None:\n second.start = Point(first.end)\n elif first.end is None and second.start is not None:\n first.end = Point(second.start)\n elif first.end != second.start:\n # The two values exist but are not equal. One must replace the other.\n if prefer_second:\n first.end = Point(second.start)\n else:\n second.start = Point(first.end)\n\n def __setitem__(self, index, new_element):\n if isinstance(new_element, str):\n new_element = Path(new_element)\n if len(new_element) == 0:\n return\n new_element = new_element.segments()\n if isinstance(index, int):\n if len(new_element) > 1:\n raise ValueError # Cannot insert multiple items into a single space. Requires slice.\n new_element = new_element[0]\n self._segments[index] = new_element\n self._length = None\n self._lengths = None\n if isinstance(index, slice):\n self.validate_connections()\n else:\n self._validate_connection(index - 1)\n self._validate_connection(index)\n if isinstance(new_element, Move):\n self._validate_move(index)\n if isinstance(new_element, Close):\n self._validate_close(index)\n\n def __delitem__(self, index):\n original_element = self._segments[index]\n del self._segments[index]\n self._length = None\n if isinstance(index, slice):\n self.validate_connections()\n else:\n self._validate_connection(index - 1)\n if isinstance(original_element, (Close, Move)):\n self._validate_subpath(index)\n\n def __iadd__(self, other):\n if isinstance(other, str):\n self.parse(other)\n elif isinstance(other, (Path, Subpath)):\n self.extend(map(copy, list(other)))\n elif isinstance(other, Shape):\n self.parse(other.d())\n elif isinstance(other, PathSegment):\n self.append(other)\n else:\n return NotImplemented\n return self\n\n def __add__(self, other):\n if isinstance(other, (str, Path, Subpath, Shape, PathSegment)):\n n = copy(self)\n n += other\n return n\n return NotImplemented\n\n def __radd__(self, other):\n if isinstance(other, str):\n path = Path(other)\n path.extend(map(copy, self._segments))\n return path\n elif isinstance(other, PathSegment):\n path = copy(self)\n path.insert(0, other)\n return path\n else:\n return NotImplemented\n\n def __len__(self):\n return len(self._segments)\n\n def __str__(self):\n return self.d()\n\n def __repr__(self):\n values = []\n if len(self) > 0:\n values.append(\", \".join(repr(x) for x in self._segments))\n self._repr_shape(values)\n params = \", \".join(values)\n name = self._name()\n return \"%s(%s)\" % (name, params)\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self.__eq__(Path(other))\n if not isinstance(other, Path):\n return NotImplemented\n if len(self) != len(other):\n return False\n p = abs(self)\n q = abs(other)\n for s, o in zip(q._segments, p._segments):\n if not s == o:\n return False\n if p.stroke_width != q.stroke_width:\n return False\n return True\n\n def __ne__(self, other):\n if not isinstance(other, (Path, str)):\n return NotImplemented\n return not self == other\n\n def parse(self, pathdef):\n \"\"\"Parses the SVG path.\"\"\"\n tokens = SVGLexicalParser()\n tokens.parse(self, pathdef)\n\n def validate_connections(self):\n \"\"\"\n Force validate all connections.\n\n This will scan path connections and link any adjacent elements together by replacing any None points or causing\n the start position of the next element to equal the end position of the previous. This should only be needed\n when combining paths and elements together. Close elements are always connected to the last Move element or to\n the end position of the first element in the list. The start element of the first segment may or may not be\n None.\n \"\"\"\n zpoint = None\n last_segment = None\n for segment in self._segments:\n if zpoint is None or isinstance(segment, Move):\n zpoint = segment.end\n if last_segment is not None:\n if segment.start is None and last_segment.end is not None:\n segment.start = Point(last_segment.end)\n elif last_segment.end is None and segment.start is not None:\n last_segment.end = Point(segment.start)\n elif last_segment.end != segment.start:\n segment.start = Point(last_segment.end)\n if (\n isinstance(segment, Close)\n and zpoint is not None\n and segment.end != zpoint\n ):\n segment.end = Point(zpoint)\n last_segment = segment\n\n def _is_valid(self):\n \"\"\"\n Checks validation of all connections.\n\n Paths are valid if all end points match the start of the next point and all close\n commands return to the last valid move command.\n\n This does not check for incongruent path validity. Path fragments without initial moves\n double closed paths, may all pass this check.\n \"\"\"\n zpoint = None\n last_segment = None\n for segment in self._segments:\n if zpoint is None or isinstance(segment, Move):\n zpoint = segment.end\n if last_segment is not None:\n if segment.start is None:\n return False\n elif last_segment.end is None:\n return False\n elif last_segment.end != segment.start:\n return False\n if (\n isinstance(segment, Close)\n and zpoint is not None\n and segment.end != zpoint\n ):\n return False\n last_segment = segment\n return True\n\n @property\n def first_point(self):\n \"\"\"First point along the Path. This is the start point of the first segment unless it starts\n with a Move command with a None start in which case first point is that Move's destination.\"\"\"\n if len(self._segments) == 0:\n return None\n if self._segments[0].start is not None:\n return Point(self._segments[0].start)\n return (\n Point(self._segments[0].end) if self._segments[0].end is not None else None\n )\n\n @property\n def current_point(self):\n if len(self._segments) == 0:\n return None\n return (\n Point(self._segments[-1].end)\n if self._segments[-1].end is not None\n else None\n )\n\n @property\n def z_point(self):\n \"\"\"\n Z is the destination of the last Move. It can mean, but doesn't necessarily mean the first_point in the path.\n This behavior of Z is defined in svg spec:\n http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand\n \"\"\"\n end_pos = None\n for segment in reversed(self._segments):\n if isinstance(segment, Move):\n end_pos = segment.end\n break\n if end_pos is None:\n try:\n end_pos = self._segments[0].end\n except IndexError:\n pass # entire path is \"z\".\n return end_pos\n\n @property\n def smooth_point(self):\n \"\"\"Returns the smoothing control point for the smooth commands.\n With regards to the SVG standard if the last command was a curve the smooth\n control point is the reflection of the previous control point.\n\n If the last command was not a curve, the smooth_point is coincident with the current.\n https://www.w3.org/TR/SVG/paths.html#PathDataCubicBezierCommands\n \"\"\"\n\n if len(self._segments) == 0:\n return None\n start_pos = self.current_point\n last_segment = self._segments[-1]\n if isinstance(last_segment, QuadraticBezier):\n previous_control = last_segment.control\n return previous_control.reflected_across(start_pos)\n elif isinstance(last_segment, CubicBezier):\n previous_control = last_segment.control2\n return previous_control.reflected_across(start_pos)\n return start_pos\n\n def start(self):\n pass\n\n def end(self):\n pass\n\n def move(self, *points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n end_pos = points[0]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = Move(start_pos, end_pos)\n segment.relative = relative\n self.append(segment)\n if len(points) > 1:\n self.line(*points[1:], relative=relative)\n return self\n\n def line(self, *points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n end_pos = points[0]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = Line(start_pos, end_pos)\n segment.relative = relative\n self.append(segment)\n if len(points) > 1:\n self.line(*points[1:])\n return self\n\n def vertical(self, *y_points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n if relative:\n segment = Line(start_pos, Point(start_pos.x, start_pos.y + y_points[0]))\n else:\n segment = Line(start_pos, Point(start_pos.x, y_points[0]))\n segment.relative = relative\n self.append(segment)\n if len(y_points) > 1:\n self.vertical(*y_points[1:], relative=relative)\n return self\n\n def horizontal(self, *x_points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n if relative:\n segment = Line(start_pos, Point(start_pos.x + x_points[0], start_pos.y))\n segment.relative = relative\n else:\n segment = Line(start_pos, Point(x_points[0], start_pos.y))\n segment.relative = relative\n self.append(segment)\n if len(x_points) > 1:\n self.horizontal(*x_points[1:], relative=relative)\n return self\n\n def smooth_quad(self, *points, **kwargs):\n \"\"\"Smooth curve. First control point is the \"reflection\" of\n the second control point in the previous path.\"\"\"\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n control1 = self.smooth_point\n end_pos = points[0]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = QuadraticBezier(start_pos, control1, end_pos)\n segment.relative = relative\n segment.smooth = True\n self.append(segment)\n if len(points) > 1:\n self.smooth_quad(*points[1:])\n return self\n\n def quad(self, *points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n control = points[0]\n if control in (\"z\", \"Z\"):\n control = self.z_point\n end_pos = points[1]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = QuadraticBezier(start_pos, control, end_pos)\n segment.relative = relative\n segment.smooth = False\n self.append(segment)\n if len(points) > 2:\n self.quad(*points[2:])\n return self\n\n def smooth_cubic(self, *points, **kwargs):\n \"\"\"Smooth curve. First control point is the \"reflection\" of\n the second control point in the previous path.\"\"\"\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n control1 = self.smooth_point\n control2 = points[0]\n\n if control2 in (\"z\", \"Z\"):\n control2 = self.z_point\n end_pos = points[1]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = CubicBezier(start_pos, control1, control2, end_pos)\n segment.relative = relative\n segment.smooth = True\n self.append(segment)\n if len(points) > 2:\n self.smooth_cubic(*points[2:])\n return self\n\n def cubic(self, *points, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n control1 = points[0]\n if control1 in (\"z\", \"Z\"):\n control1 = self.z_point\n control2 = points[1]\n if control2 in (\"z\", \"Z\"):\n control2 = self.z_point\n end_pos = points[2]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = CubicBezier(start_pos, control1, control2, end_pos)\n segment.relative = relative\n segment.smooth = False\n self.append(segment)\n if len(points) > 3:\n self.cubic(*points[3:])\n return self\n\n def arc(self, *arc_args, **kwargs):\n relative = kwargs[\"relative\"] if \"relative\" in kwargs else False\n start_pos = self.current_point\n rx = arc_args[0]\n ry = arc_args[1]\n rotation = arc_args[2]\n arc = arc_args[3]\n sweep = arc_args[4]\n end_pos = arc_args[5]\n if end_pos in (\"z\", \"Z\"):\n end_pos = self.z_point\n segment = Arc(start_pos, rx, ry, rotation, arc, sweep, end_pos)\n segment.relative = relative\n self.append(segment)\n if len(arc_args) > 6:\n self.arc(*arc_args[6:])\n return self\n\n def closed(self, relative=False):\n start_pos = self.current_point\n end_pos = self.z_point\n segment = Close(start_pos, end_pos)\n segment.relative = relative\n self.append(segment)\n return self\n\n def append(self, value):\n if isinstance(value, str):\n value = Path(value)\n if len(value) == 0:\n return\n if len(value) > 1:\n self.extend(value)\n return\n value = value[0]\n self._length = None\n index = len(self._segments) - 1\n self._segments.append(value)\n self._validate_connection(index)\n if isinstance(value, Close):\n self._validate_close(index + 1)\n\n def insert(self, index, value):\n if isinstance(value, str):\n value = Path(value)\n if len(value) == 0:\n return\n value = value[0]\n self._length = None\n self._segments.insert(index, value)\n self._validate_connection(index - 1)\n self._validate_connection(index)\n if isinstance(value, Move):\n self._validate_move(index)\n if isinstance(value, Close):\n self._validate_close(index)\n\n def extend(self, iterable):\n if isinstance(iterable, str):\n iterable = Path(iterable)\n self._length = None\n index = len(self._segments) - 1\n self._segments.extend(iterable)\n self._validate_connection(index)\n self._validate_subpath(index)\n\n def direct_close(self):\n \"\"\"Forces close operations to be zero length by introducing a direct\n line to operation just before any non-zero length close.\n\n This is helpful because for some operations like reverse() because the\n close must located at the very end of the path sequence. But, if it's\n in effect a line-to and close, the line-to would need to start the sequence.\n\n But, for some operations this won't matter since it will still result in\n a closed shape with reversed ordering. But, if the final point in the\n sequence must exactly switch with the first point in the sequence. The\n close segments must be direct and zero length.\n \"\"\"\n if len(self._segments) == 0:\n return\n for i in range(len(self._segments) - 1, -1, -1):\n segment = self._segments[i]\n if isinstance(segment, Close):\n if segment.length() != 0:\n line = Line(segment.start, segment.end)\n segment.start = Point(segment.end)\n self.insert(i, line)\n return self\n\n def reverse(self):\n if len(self._segments) == 0:\n return\n prepoint = self._segments[0].start\n self._segments[0].start = None\n p = Path()\n subpaths = list(self.as_subpaths())\n for subpath in subpaths:\n subpath.reverse()\n for subpath in reversed(subpaths):\n p += subpath\n self._segments = p._segments\n self._segments[0].start = prepoint\n return self\n\n def subpath(self, index):\n subpaths = list(self.as_subpaths())\n return subpaths[index]\n\n def count_subpaths(self):\n subpaths = list(self.as_subpaths())\n return len(subpaths)\n\n def as_subpaths(self):\n last = 0\n for current, seg in enumerate(self):\n if current != last and isinstance(seg, Move):\n yield Subpath(self, last, current - 1)\n last = current\n yield Subpath(self, last, len(self) - 1)\n\n def as_points(self):\n \"\"\"Returns the list of defining points within path\"\"\"\n for seg in self:\n for p in seg:\n if not isinstance(p, Point):\n yield Point(p)\n else:\n yield p\n\n def reify(self):\n \"\"\"\n Realizes the transform to the shape properties.\n\n Path objects reify perfectly.\n \"\"\"\n GraphicObject.reify(self)\n Transformable.reify(self)\n if isinstance(self.transform, Matrix):\n for e in self._segments:\n e *= self.transform\n self.transform.reset()\n return self\n\n @staticmethod\n def svg_d(segments, relative=None, smooth=None):\n if len(segments) == 0:\n return \"\"\n parts = []\n previous_segment = None\n p = Point(0)\n if smooth is None:\n override_smooth = False\n smooth_set_value = True\n else:\n override_smooth = True\n smooth_set_value = bool(smooth)\n if relative is not None:\n for segment in segments:\n if isinstance(segment, (Move, Line, Arc, Close)):\n parts.append(segment.d(p, relative=relative))\n elif isinstance(segment, (CubicBezier, QuadraticBezier)):\n if (override_smooth and smooth_set_value) or (\n not override_smooth and segment.smooth\n ):\n parts.append(\n segment.d(\n p,\n relative=relative,\n smooth=segment.is_smooth_from(previous_segment),\n )\n )\n else:\n parts.append(segment.d(p, relative=relative, smooth=False))\n previous_segment = segment\n p = previous_segment.end\n else:\n for segment in segments:\n if isinstance(segment, (Move, Line, Arc, Close)):\n parts.append(segment.d(p, relative=segment.relative))\n elif isinstance(segment, (CubicBezier, QuadraticBezier)):\n if (override_smooth and smooth_set_value) or (\n not override_smooth and segment.smooth\n ):\n parts.append(\n segment.d(\n p,\n relative=segment.relative,\n smooth=segment.is_smooth_from(previous_segment),\n )\n )\n else:\n parts.append(\n segment.d(p, relative=segment.relative, smooth=False)\n )\n previous_segment = segment\n p = previous_segment.end\n return \" \".join(parts)\n\n def d(self, relative=None, transformed=True, smooth=None):\n path = self\n if transformed:\n path = abs(path)\n return Path.svg_d(path._segments, relative=relative, smooth=smooth)\n\n def segments(self, transformed=True):\n if transformed and not self.transform.is_identity():\n return [s * self.transform for s in self._segments]\n return self._segments\n\n def approximate_arcs_with_cubics(self, error=0.1):\n \"\"\"\n Iterates through this path and replaces any Arcs with cubic bezier curves.\n \"\"\"\n sweep_limit = tau * error\n for s in range(len(self) - 1, -1, -1):\n segment = self[s]\n if isinstance(segment, Arc):\n arc_required = int(ceil(abs(segment.sweep) / sweep_limit))\n self[s : s + 1] = list(segment.as_cubic_curves(arc_required))\n\n def approximate_arcs_with_quads(self, error=0.1):\n \"\"\"\n Iterates through this path and replaces any Arcs with quadratic bezier curves.\n \"\"\"\n sweep_limit = tau * error\n for s in range(len(self) - 1, -1, -1):\n segment = self[s]\n if isinstance(segment, Arc):\n arc_required = int(ceil(abs(segment.sweep) / sweep_limit))\n self[s : s + 1] = list(segment.as_quad_curves(arc_required))\n\n\nclass Rect(Shape):\n \"\"\"\n SVG Rect shapes are defined in SVG2 10.2\n https://www.w3.org/TR/SVG2/shapes.html#RectElement\n\n These have geometric properties x, y, width, height, rx, ry\n Geometric properties can be Length values.\n\n Rect(x, y, width, height)\n Rect(x, y, width, height, rx, ry)\n Rect(x, y, width, height, rx, ry, matrix)\n Rect(x, y, width, height, rx, ry, matrix, stroke, fill)\n\n Rect(dict): dictionary values read from svg.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.rx = None\n self.ry = None\n Shape.__init__(self, *args, **kwargs)\n self._validate_rect()\n\n def property_by_object(self, s):\n Shape.property_by_object(self, s)\n self.x = s.x\n self.y = s.y\n self.width = s.width\n self.height = s.height\n self.rx = s.rx\n self.ry = s.ry\n self._validate_rect()\n\n def property_by_values(self, values):\n Shape.property_by_values(self, values)\n self.x = Length(values.get(SVG_ATTR_X, 0)).value()\n self.y = Length(values.get(SVG_ATTR_Y, 0)).value()\n self.width = Length(values.get(SVG_ATTR_WIDTH, 1)).value()\n self.height = Length(values.get(SVG_ATTR_HEIGHT, 1)).value()\n self.rx = Length(values.get(SVG_ATTR_RADIUS_X, None)).value()\n self.ry = Length(values.get(SVG_ATTR_RADIUS_Y, None)).value()\n\n def property_by_args(self, *args):\n arg_length = len(args)\n if arg_length >= 1:\n self.x = Length(args[0]).value()\n if arg_length >= 2:\n self.y = Length(args[1]).value()\n if arg_length >= 3:\n self.width = Length(args[2]).value()\n if arg_length >= 4:\n self.height = Length(args[3]).value()\n if arg_length >= 5:\n self.rx = Length(args[4]).value()\n if arg_length >= 6:\n self.ry = Length(args[5]).value()\n if arg_length >= 7:\n self._init_shape(*args[6:])\n\n def _validate_rect(self):\n \"\"\"None is 'auto' for values.\"\"\"\n rx = self.rx\n ry = self.ry\n if rx is None and ry is None:\n rx = ry = 0\n if rx is not None and ry is None:\n rx = Length(rx).value(relative_length=self.width)\n ry = rx\n elif ry is not None and rx is None:\n ry = Length(ry).value(relative_length=self.height)\n rx = ry\n elif rx is not None and ry is not None:\n rx = Length(rx).value(relative_length=self.width)\n ry = Length(ry).value(relative_length=self.height)\n if rx == 0 or ry == 0:\n rx = ry = 0\n else:\n rx = min(rx, self.width / 2.0)\n ry = min(ry, self.height / 2.0)\n self.rx = rx\n self.ry = ry\n\n def __repr__(self):\n values = []\n if self.x != 0:\n values.append(\"x=%s\" % Length.str(self.x))\n if self.y != 0:\n values.append(\"y=%s\" % Length.str(self.y))\n if self.width != 0:\n values.append(\"width=%s\" % Length.str(self.width))\n if self.height != 0:\n values.append(\"height=%s\" % Length.str(self.height))\n if self.rx != 0:\n values.append(\"rx=%s\" % Length.str(self.rx))\n if self.ry != 0:\n values.append(\"ry=%s\" % Length.str(self.ry))\n self._repr_shape(values)\n params = \", \".join(values)\n return \"Rect(%s)\" % params\n\n def __copy__(self):\n return Rect(self)\n\n @property\n def implicit_position(self):\n if not self.apply:\n return Point(self.x, self.y)\n point = Point(self.x, self.y)\n point *= self.transform\n return point\n\n @property\n def implicit_x(self):\n if not self.apply:\n return self.x\n return self.implicit_position[0]\n\n @property\n def implicit_y(self):\n if not self.apply:\n return self.y\n return self.implicit_position[1]\n\n @property\n def implicit_width(self):\n if not self.apply:\n return self.width\n p = Point(self.width, 0)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n @property\n def implicit_height(self):\n if not self.apply:\n return self.height\n p = Point(0, self.height)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n @property\n def implicit_rx(self):\n if not self.apply:\n return self.rx\n p = Point(self.rx, 0)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n @property\n def implicit_ry(self):\n if not self.apply:\n return self.ry\n p = Point(0, self.ry)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n def segments(self, transformed=True):\n \"\"\"\n Rect decomposition is given in SVG 2.0 10.2\n\n Rect:\n * perform an absolute moveto operation to location (x,y);\n * perform an absolute horizontal lineto with parameter x+width;\n * perform an absolute vertical lineto parameter y+height;\n * perform an absolute horizontal lineto parameter x;\n * ( close the path)\n\n Rounded Rect:\n rx and ry are used as the equivalent parameters to the elliptical arc command,\n the x-axis-rotation and large-arc-flag are set to zero, the sweep-flag is set to one\n\n * perform an absolute moveto operation to location (x+rx,y);\n * perform an absolute horizontal lineto with parameter x+width-rx;\n * perform an absolute elliptical arc operation to coordinate (x+width,y+ry)\n * perform an absolute vertical lineto parameter y+height-ry;\n * perform an absolute elliptical arc operation to coordinate (x+width-rx,y+height)\n * perform an absolute horizontal lineto parameter x+rx;\n * perform an absolute elliptical arc operation to coordinate (x,y+height-ry)\n * perform an absolute vertical lineto parameter y+ry\n * perform an absolute elliptical arc operation with a segment-completing close path operation\n\n :param transformed: provide the reified version.\n :return: path_d of shape.\n \"\"\"\n x = self.x\n y = self.y\n width = self.width\n height = self.height\n if width == 0 or height == 0:\n return () # a computed value of zero for either dimension disables rendering.\n rx = self.rx\n ry = self.ry\n if rx == ry == 0:\n segments = (\n Move(None, (x, y)),\n Line((x, y), (x + width, y)),\n Line((x + width, y), (x + width, y + height)),\n Line((x + width, y + height), (x, y + height)),\n Close((x, y + height), (x, y)),\n )\n else:\n segments = (\n Move(None, (x + rx, y)),\n Line((x + rx, y), (x + width - rx, y)),\n Arc((x + width - rx, y), (x + width, y + ry), rx=rx, ry=ry),\n Line((x + width, y + ry), (x + width, y + height - ry)),\n Arc(\n (x + width, y + height - ry),\n (x + width - rx, y + height),\n rx=rx,\n ry=ry,\n ),\n Line((x + width - rx, y + height), (x + rx, y + height)),\n Arc((x + rx, y + height), (x, y + height - ry), rx=rx, ry=ry),\n Line((x, y + height - ry), (x, y + ry)),\n Arc((x, y + ry), (x + rx, y), rx=rx, ry=ry),\n Close((x + rx, y), (x + rx, y)),\n )\n if not transformed or self.transform.is_identity():\n return segments\n else:\n return [s * self.transform for s in segments]\n\n def reify(self):\n \"\"\"\n Realizes the transform to the shape properties.\n\n If the realized shape can be properly represented as a rectangle with an identity matrix\n it will be, otherwise the properties will approximate the implied values.\n\n Skewed and Rotated rectangles cannot be reified.\n \"\"\"\n GraphicObject.reify(self)\n Transformable.reify(self)\n scale_x = self.transform.value_scale_x()\n scale_y = self.transform.value_scale_y()\n translate_x = self.transform.value_trans_x()\n translate_y = self.transform.value_trans_y()\n if (\n self.transform.value_skew_x() == 0\n and self.transform.value_skew_y() == 0\n and scale_x != 0\n and scale_y != 0\n ):\n self.x *= scale_x\n self.y *= scale_y\n self.x += translate_x\n self.y += translate_y\n self.transform *= Matrix.translate(-translate_x, -translate_y)\n self.rx = scale_x * self.rx\n self.ry = scale_y * self.ry\n self.width = scale_x * self.width\n self.height = scale_y * self.height\n self.transform *= Matrix.scale(1.0 / scale_x, 1.0 / scale_y)\n return self\n\n def render(self, **kwargs):\n Shape.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.x, Length):\n self.x = self.x.value(relative_length=width, **kwargs)\n if isinstance(self.y, Length):\n self.y = self.y.value(relative_length=height, **kwargs)\n if isinstance(self.width, Length):\n self.width = self.width.value(relative_length=width, **kwargs)\n if isinstance(self.height, Length):\n self.height = self.height.value(relative_length=height, **kwargs)\n if isinstance(self.rx, Length):\n self.rx = self.rx.value(relative_length=width, **kwargs)\n if isinstance(self.ry, Length):\n self.ry = self.ry.value(relative_length=height, **kwargs)\n return self\n\n\nclass _RoundShape(Shape):\n def __init__(self, *args, **kwargs):\n self.cx = None\n self.cy = None\n self.rx = None\n self.ry = None\n Shape.__init__(self, *args, **kwargs)\n\n def property_by_object(self, s):\n Shape.property_by_object(self, s)\n self.cx = s.cx\n self.cy = s.cy\n self.rx = s.rx\n self.ry = s.ry\n\n def property_by_values(self, values):\n Shape.property_by_values(self, values)\n self.cx = Length(values.get(SVG_ATTR_CENTER_X)).value()\n self.cy = Length(values.get(SVG_ATTR_CENTER_Y)).value()\n self.rx = Length(values.get(SVG_ATTR_RADIUS_X)).value()\n self.ry = Length(values.get(SVG_ATTR_RADIUS_Y)).value()\n r = Length(values.get(SVG_ATTR_RADIUS, None)).value()\n if r is not None:\n self.rx = r\n self.ry = r\n else:\n if self.rx is None:\n self.rx = 1\n if self.ry is None:\n self.ry = 1\n center = values.get(\"center\", None)\n if center is not None:\n self.cx, self.cy = Point(center)\n\n if self.cx is None:\n self.cx = 0\n if self.cy is None:\n self.cy = 0\n\n def property_by_args(self, *args):\n arg_length = len(args)\n if arg_length >= 1:\n self.cx = Length(args[0]).value()\n if arg_length >= 2:\n self.cy = Length(args[1]).value()\n if arg_length >= 3:\n self.rx = Length(args[2]).value()\n if arg_length >= 4:\n self.ry = Length(args[3]).value()\n else:\n self.ry = self.rx\n if arg_length >= 5:\n self._init_shape(*args[4:])\n\n def __repr__(self):\n values = []\n if self.cx is not None:\n values.append(\"cx=%s\" % Length.str(self.cx))\n if self.cy is not None:\n values.append(\"cy=%s\" % Length.str(self.cy))\n if self.rx == self.ry or self.ry is None:\n values.append(\"r=%s\" % Length.str(self.rx))\n else:\n values.append(\"rx=%s\" % Length.str(self.rx))\n values.append(\"ry=%s\" % Length.str(self.ry))\n self._repr_shape(values)\n params = \", \".join(values)\n name = self._name()\n return \"%s(%s)\" % (name, params)\n\n @property\n def implicit_rx(self):\n if not self.apply:\n return self.rx\n prx = Point(self.rx, 0)\n prx *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(prx)\n\n @property\n def implicit_ry(self):\n if not self.apply:\n return self.ry\n pry = Point(0, self.ry)\n pry *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(pry)\n\n implicit_r = implicit_rx\n\n @property\n def implicit_center(self):\n center = Point(self.cx, self.cy)\n if not self.apply:\n return center\n center *= self.transform\n return center\n\n def segments(self, transformed=True):\n \"\"\"\n SVG path decomposition is given in SVG 2.0 10.3, 10.4.\n\n A move-to command to the point cx+rx,cy;\n arc to cx,cy+ry;\n arc to cx-rx,cy;\n arc to cx,cy-ry;\n arc with a segment-completing close path operation.\n\n Converts the parameters from an ellipse or a circle to a string for a\n Path object d-attribute\"\"\"\n original = self.apply\n self.apply = transformed\n path = Path()\n steps = 4\n step_size = tau / steps\n if (\n transformed\n and self.transform.value_scale_x() * self.transform.value_scale_y() < 0\n ):\n step_size = -step_size\n t_start = 0\n t_end = step_size\n # zero for either dimension, or a computed value of auto for both dimensions, disables rendering of the element.\n rx = self.implicit_rx\n ry = self.implicit_ry\n if rx == 0 or ry == 0:\n return ()\n center = self.implicit_center\n path.move((self.point_at_t(0)))\n for i in range(steps):\n path += Arc(\n self.point_at_t(t_start),\n self.point_at_t(t_end),\n center,\n rx=rx,\n ry=ry,\n rotation=self.rotation,\n sweep=step_size,\n )\n t_start = t_end\n t_end += step_size\n path.closed()\n self.apply = original\n return path.segments(transformed)\n\n def reify(self):\n \"\"\"\n Realizes the transform to the shape properties.\n\n Skewed and Rotated roundshapes cannot be reified.\n \"\"\"\n GraphicObject.reify(self)\n Transformable.reify(self)\n scale_x = abs(self.transform.value_scale_x())\n scale_y = abs(self.transform.value_scale_y())\n translate_x = self.transform.value_trans_x()\n translate_y = self.transform.value_trans_y()\n if (\n self.transform.value_skew_x() == 0\n and self.transform.value_skew_y() == 0\n and scale_x != 0\n and scale_y != 0\n ):\n self.cx *= scale_x\n self.cy *= scale_y\n self.cx += translate_x\n self.cy += translate_y\n self.transform *= Matrix.translate(-translate_x, -translate_y)\n self.rx = scale_x * self.rx\n self.ry = scale_y * self.ry\n self.transform *= Matrix.scale(1.0 / scale_x, 1.0 / scale_y)\n return self\n\n def render(self, **kwargs):\n Shape.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.cx, Length):\n self.cx = self.cx.value(relative_length=width, **kwargs)\n if isinstance(self.cy, Length):\n self.cy = self.cy.value(relative_length=height, **kwargs)\n if isinstance(self.rx, Length):\n self.rx = self.rx.value(relative_length=width, **kwargs)\n if isinstance(self.ry, Length):\n self.ry = self.ry.value(relative_length=height, **kwargs)\n return self\n\n def unit_matrix(self):\n \"\"\"\n return the unit matrix which could would transform the unit circle into this ellipse.\n\n One of the valid parameterizations for ellipses is that they are all affine transforms of the unit circle.\n This provides exactly such a matrix.\n\n :return: matrix\n \"\"\"\n m = Matrix()\n m.post_scale(self.implicit_rx, self.implicit_ry)\n m.post_rotate(self.rotation)\n center = self.implicit_center\n m.post_translate(center.x, center.y)\n return m\n\n def arc_t(self, t0, t1):\n \"\"\"\n return the arc found between the given values of t on the ellipse.\n\n :param t0: t start\n :param t1: t end\n :return: arc\n \"\"\"\n return Arc(\n self.point_at_t(t0),\n self.point_at_t(t1),\n self.implicit_center,\n rx=self.implicit_rx,\n ry=self.implicit_ry,\n rotation=self.rotation,\n sweep=t1 - t0,\n )\n\n def arc_angle(self, a0, a1, ccw=None):\n \"\"\"\n return the arc found between the given angles on the ellipse.\n\n :param a0: start angle\n :param a1: end angle\n :return: arc\n \"\"\"\n if ccw is None:\n ccw = a0 > a1\n return Arc(\n self.point_at_angle(a0),\n self.point_at_angle(a1),\n self.implicit_center,\n rx=self.implicit_rx,\n ry=self.implicit_ry,\n rotation=self.rotation,\n ccw=ccw,\n )\n\n def point_at_angle(self, angle):\n \"\"\"\n find the point on the ellipse from the center at the given angle.\n Note: For non-circular arcs this is different than point(t).\n\n :param angle: angle from center to find point\n :return: point found\n \"\"\"\n a = self.implicit_rx\n b = self.implicit_ry\n if a == b:\n return self.point_at_t(angle)\n angle -= self.rotation\n t = atan2(a * tan(angle), b)\n tau_1_4 = tau / 4.0\n tau_3_4 = 3 * tau_1_4\n if tau_3_4 >= abs(angle) % tau > tau_1_4:\n t += tau / 2.0\n return self.point_at_t(t)\n\n def angle_at_point(self, p):\n \"\"\"\n find the angle to the point.\n\n :param p: point\n :return: angle to given point.\n \"\"\"\n if self.apply and not self.transform.is_identity():\n return self.implicit_center.angle_to(p)\n else:\n center = Point(self.cx, self.cy)\n return center.angle_to(p)\n\n def t_at_point(self, p):\n \"\"\"\n find the t parameter to at the point.\n\n :param p: point\n :return: t parameter to the given point.\n \"\"\"\n angle = self.angle_at_point(p)\n angle -= self.rotation\n a = self.implicit_rx\n b = self.implicit_ry\n t = atan2(a * tan(angle), b)\n tau_1_4 = tau / 4.0\n tau_3_4 = 3 * tau_1_4\n if tau_3_4 >= abs(angle) % tau > tau_1_4:\n t += tau / 2.0\n return t\n\n def point_at_t(self, t):\n \"\"\"\n find the point that corresponds to given value t.\n Where t=0 is the first point and t=tau is the final point.\n\n In the case of a circle: t = angle.\n\n :param t:\n :return:\n \"\"\"\n rotation = self.rotation\n a = self.implicit_rx\n b = self.implicit_ry\n center = self.implicit_center\n cx = center.x\n cy = center.y\n cosTheta = cos(rotation)\n sinTheta = sin(rotation)\n cosT = cos(t)\n sinT = sin(t)\n px = cx + a * cosT * cosTheta - b * sinT * sinTheta\n py = cy + a * cosT * sinTheta + b * sinT * cosTheta\n return Point(px, py)\n\n def point(self, position, error=ERROR):\n \"\"\"\n find the point that corresponds to given value [0,1].\n Where t=0 is the first point and t=1 is the final point.\n\n :param position:\n :return: point at t\n \"\"\"\n return self.point_at_t(tau * position)\n\n def _ramanujan_length(self):\n a = self.implicit_rx\n b = self.implicit_ry\n if b > a:\n a, b = b, a\n h = (a - b) ** 2 / (a + b) ** 2\n return pi * (a + b) * (1 + (3 * h / (10 + sqrt(4 - 3 * h))))\n\n\nclass Ellipse(_RoundShape):\n \"\"\"\n SVG Ellipse shapes are defined in SVG2 10.4\n https://www.w3.org/TR/SVG2/shapes.html#EllipseElement\n\n These have geometric properties cx, cy, rx, ry\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n _RoundShape.__init__(self, *args, **kwargs)\n\n def __copy__(self):\n return Ellipse(self)\n\n def _name(self):\n return self.__class__.__name__\n\n\nclass Circle(_RoundShape):\n \"\"\"\n SVG Circle shapes are defined in SVG2 10.3\n https://www.w3.org/TR/SVG2/shapes.html#CircleElement\n\n These have geometric properties cx, cy, r\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n _RoundShape.__init__(self, *args, **kwargs)\n\n def __copy__(self):\n return Circle(self)\n\n def _name(self):\n return self.__class__.__name__\n\n\nclass SimpleLine(Shape):\n \"\"\"\n SVG Line shapes are defined in SVG2 10.5\n https://www.w3.org/TR/SVG2/shapes.html#LineElement\n\n These have geometric properties x1, y1, x2, y2\n\n These are called Line in SVG but that name is already used for Line(PathSegment)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.x1 = None\n self.y1 = None\n self.x2 = None\n self.y2 = None\n Shape.__init__(self, *args, **kwargs)\n\n def property_by_object(self, s):\n Shape.property_by_object(self, s)\n self.x1 = s.x1\n self.y1 = s.y1\n self.x2 = s.x2\n self.y2 = s.y2\n\n def property_by_values(self, values):\n Shape.property_by_values(self, values)\n self.x1 = Length(values.get(SVG_ATTR_X1, 0)).value()\n self.y1 = Length(values.get(SVG_ATTR_Y1, 0)).value()\n self.x2 = Length(values.get(SVG_ATTR_X2, 0)).value()\n self.y2 = Length(values.get(SVG_ATTR_Y2, 0)).value()\n\n def property_by_args(self, *args):\n arg_length = len(args)\n if arg_length >= 1:\n self.x1 = Length(args[0]).value()\n if arg_length >= 2:\n self.y1 = Length(args[1]).value()\n if arg_length >= 3:\n self.x2 = Length(args[2]).value()\n if arg_length >= 4:\n self.y2 = Length(args[3]).value()\n self._init_shape(*args[4:])\n\n def __repr__(self):\n values = []\n if self.x1 is not None:\n values.append(\"x1=%s\" % repr(self.x1))\n if self.y1 is not None:\n values.append(\"y1=%s\" % repr(self.y1))\n if self.x2 is not None:\n values.append(\"x2=%s\" % repr(self.x2))\n if self.y2 is not None:\n values.append(\"y2=%s\" % repr(self.y2))\n self._repr_shape(values)\n params = \", \".join(values)\n return \"SimpleLine(%s)\" % params\n\n def __copy__(self):\n return SimpleLine(self)\n\n @property\n def implicit_x1(self):\n point = Point(self.x1, self.y1)\n point *= self.transform\n return point.x\n\n @property\n def implicit_y1(self):\n point = Point(self.x1, self.y1)\n point *= self.transform\n return point.y\n\n @property\n def implicit_x2(self):\n point = Point(self.x2, self.y2)\n point *= self.transform\n return point.x\n\n @property\n def implicit_y2(self):\n point = Point(self.x2, self.y2)\n point *= self.transform\n return point.y\n\n def segments(self, transformed=True):\n \"\"\"\n SVG path decomposition is given in SVG 2.0 10.5.\n\n perform an absolute moveto operation to absolute location (x1,y1)\n perform an absolute lineto operation to absolute location (x2,y2)\n\n :returns Path_d path for line.\n \"\"\"\n\n start = Point(self.x1, self.y1)\n end = Point(self.x2, self.y2)\n if transformed:\n start *= self.transform\n end *= self.transform\n return (Move(None, start), Line(start, end))\n\n def reify(self):\n \"\"\"\n Realizes the transform to the shape properties.\n\n SimpleLines are perfectly reified.\n \"\"\"\n GraphicObject.reify(self)\n Transformable.reify(self)\n matrix = self.transform\n p = Point(self.x1, self.y1)\n p *= matrix\n self.x1 = p.x\n self.y1 = p.y\n\n p = Point(self.x2, self.y2)\n p *= matrix\n self.x2 = p.x\n self.y2 = p.y\n\n matrix.reset()\n return self\n\n def render(self, **kwargs):\n Shape.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.x1, Length):\n self.x1 = self.x1.value(relative_length=width, **kwargs)\n if isinstance(self.y1, Length):\n self.y1 = self.y1.value(relative_length=height, **kwargs)\n if isinstance(self.x2, Length):\n self.x2 = self.x2.value(relative_length=width, **kwargs)\n if isinstance(self.y2, Length):\n self.y2 = self.y2.value(relative_length=height, **kwargs)\n return self\n\n\nclass _Polyshape(Shape):\n \"\"\"Base form of Polygon and Polyline since the objects are nearly the same.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.points = list()\n Shape.__init__(self, *args, **kwargs)\n\n def property_by_object(self, s):\n Shape.property_by_object(self, s)\n self._init_points(s.points)\n\n def property_by_values(self, values):\n Shape.property_by_values(self, values)\n self._init_points(values)\n\n def property_by_args(self, *args):\n self._init_points(args)\n\n def _init_points(self, points):\n if len(self.points) != 0:\n return\n if points is None:\n self.points = list()\n return\n if isinstance(points, dict):\n if SVG_ATTR_POINTS in points:\n points = points[SVG_ATTR_POINTS]\n else:\n self.points = list()\n return\n try:\n if len(points) == 1:\n points = points[0]\n except TypeError:\n pass\n if isinstance(points, str):\n findall = REGEX_COORD_PAIR.findall(points)\n self.points = [Point(float(j), float(k)) for j, k in findall]\n elif isinstance(points, (list, tuple)):\n if len(points) == 0:\n self.points = list()\n else:\n first_point = points[0]\n if isinstance(first_point, (float, int)):\n self.points = list(map(Point, zip(*[iter(points)] * 2)))\n elif isinstance(first_point, (list, tuple, complex, str, Point)):\n self.points = list(map(Point, points))\n else:\n self.points = list()\n\n def __repr__(self):\n values = []\n if self.points is not None:\n s = \", \".join(map(str, self.points))\n values.append(\"points=(%s)\" % repr(s))\n self._repr_shape(values)\n params = \", \".join(values)\n name = self._name()\n return \"%s(%s)\" % (name, params)\n\n def __len__(self):\n return len(self.points)\n\n def __getitem__(self, item):\n return self.points[item]\n\n def segments(self, transformed=True):\n \"\"\"\n Polyline and Polygon decomposition is given in SVG2. 10.6 and 10.7\n\n * perform an absolute moveto operation to the first coordinate pair in the list of points\n * for each subsequent coordinate pair, perform an absolute lineto operation to that coordinate pair.\n * (Polygon-only) perform a closepath command\n\n Note: For a polygon/polyline made from n points, the resulting path will\n be composed of n lines (even if some of these lines have length zero).\n \"\"\"\n if self.transform.is_identity() or not transformed:\n points = self.points\n else:\n points = list(map(self.transform.point_in_matrix_space, self.points))\n if len(points) == 0:\n return []\n segments = [Move(None, points[0])]\n last = points[0]\n for i in range(1, len(points)):\n current = points[i]\n segments.append(Line(last, current))\n last = current\n if isinstance(self, Polygon):\n segments.append(Close(last, points[0]))\n return segments\n\n def reify(self):\n \"\"\"\n Realizes the transform to the shape properties.\n\n Polyshapes are perfectly reified.\n \"\"\"\n GraphicObject.reify(self)\n Transformable.reify(self)\n matrix = self.transform\n for p in self:\n p *= matrix\n matrix.reset()\n return self\n\n\nclass Polyline(_Polyshape):\n \"\"\"\n SVG Polyline shapes are defined in SVG2 10.6\n https://www.w3.org/TR/SVG2/shapes.html#PolylineElement\n\n These have geometric properties points\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n _Polyshape.__init__(self, *args, **kwargs)\n\n def __copy__(self):\n return Polyline(self)\n\n def _name(self):\n return self.__class__.__name__\n\n\nclass Polygon(_Polyshape):\n \"\"\"\n SVG Polygon shapes are defined in SVG2 10.7\n https://www.w3.org/TR/SVG2/shapes.html#PolygonElement\n\n These have geometric properties points\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n _Polyshape.__init__(self, *args, **kwargs)\n\n def __copy__(self):\n return Polygon(self)\n\n def _name(self):\n return self.__class__.__name__\n\n\nclass Subpath:\n \"\"\"\n Subpath is a Path-backed window implementation. It does not store a list of segments but rather\n stores a Path, start position, end position. When a function is called on a subpath, the result of\n those events is performed on the backing Path. When the backing Path is modified the behavior is\n undefined.\"\"\"\n\n def __init__(self, path, start, end):\n self._path = path\n self._start = start\n self._end = end\n\n def __copy__(self):\n return Subpath(Path(self._path), self._start, self._end)\n\n def __getitem__(self, index):\n return self._path[self.index_to_path_index(index)]\n\n def __setitem__(self, index, value):\n self._path[self.index_to_path_index(index)] = value\n\n def __delitem__(self, index):\n del self._path[self.index_to_path_index(index)]\n self._end -= 1\n\n def __iadd__(self, other):\n if isinstance(other, str):\n p = Path(other)\n self._path[self._end : self._end] = p\n elif isinstance(other, Path):\n p = copy(other)\n self._path[self._end : self._end] = p\n elif isinstance(other, PathSegment):\n self._path.insert(self._end, other)\n else:\n return NotImplemented\n return self\n\n def __add__(self, other):\n if isinstance(other, (str, Path, PathSegment)):\n n = copy(self)\n n += other\n return n\n return NotImplemented\n\n def __radd__(self, other):\n if isinstance(other, str):\n path = Path(other)\n path.extend(map(copy, self._path))\n return path\n elif isinstance(other, PathSegment):\n path = Path(self)\n path.insert(0, other)\n return path\n else:\n return NotImplemented\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n for e in self:\n e *= other\n return self\n\n def __mul__(self, other):\n if isinstance(other, (Matrix, str)):\n n = copy(self)\n n *= other\n return n\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __iter__(self):\n class Iterator:\n def __init__(self, subpath):\n self.n = subpath._start - 1\n self.subpath = subpath\n\n def __next__(self):\n self.n += 1\n try:\n if self.n > self.subpath._end:\n raise StopIteration\n return self.subpath._path[self.n]\n except IndexError:\n raise StopIteration\n\n next = __next__\n\n return Iterator(self)\n\n def __len__(self):\n return self._end - self._start + 1\n\n def __str__(self):\n return self.d()\n\n def __repr__(self):\n return \"Path(%s)\" % (\", \".join(repr(x) for x in self))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self.__eq__(Path(other))\n if not isinstance(other, (Path, Subpath)):\n return NotImplemented\n if len(self) != len(other):\n return False\n for s, o in zip(self, other):\n if not s == o:\n return False\n return True\n\n def __ne__(self, other):\n if not isinstance(other, (Path, Subpath, str)):\n return NotImplemented\n return not self == other\n\n def segments(self, transformed=True):\n path = self._path\n if transformed:\n return [\n s * path.transform for s in path._segments[self._start : self._end + 1]\n ]\n return path._segments[self._start : self._end + 1]\n\n def _numeric_index(self, index):\n if index < 0:\n return self._end + index + 1\n else:\n return self._start + index\n\n def index_to_path_index(self, index):\n if isinstance(index, slice):\n start = index.start\n stop = index.stop\n step = index.step\n if start is None:\n start = 0\n start = self._numeric_index(start)\n if stop is None:\n stop = len(self)\n stop = self._numeric_index(stop)\n return slice(start, stop, step)\n return self._numeric_index(index)\n\n def bbox(self):\n \"\"\"returns a bounding box for the input Path\"\"\"\n segments = self._path._segments[self._start : self._end + 1]\n bbs = [seg.bbox() for seg in segments if not isinstance(Close, Move)]\n try:\n xmins, ymins, xmaxs, ymaxs = list(zip(*bbs))\n except ValueError:\n return None # No bounding box items existed. So no bounding box.\n xmin = min(xmins)\n xmax = max(xmaxs)\n ymin = min(ymins)\n ymax = max(ymaxs)\n return xmin, ymin, xmax, ymax\n\n def d(self, relative=None, smooth=None):\n segments = self._path._segments[self._start : self._end + 1]\n return Path.svg_d(segments, relative=relative, smooth=None)\n\n def _reverse_segments(self, start, end):\n \"\"\"Reverses segments between the given indexes in the subpath space.\"\"\"\n segments = self._path._segments # must avoid path validation.\n s = self.index_to_path_index(start)\n e = self.index_to_path_index(end)\n while s <= e:\n start_segment = segments[s]\n end_segment = segments[e]\n start_segment.reverse()\n if start_segment is not end_segment:\n end_segment.reverse()\n segments[s] = end_segment\n segments[e] = start_segment\n s += 1\n e -= 1\n start = self.index_to_path_index(start)\n end = self.index_to_path_index(end)\n self._path._validate_connection(start - 1, prefer_second=True)\n self._path._validate_connection(end)\n\n def reverse(self):\n size = len(self)\n if size == 0:\n return\n start = 0\n end = size - 1\n if isinstance(self[-1], Close):\n end -= 1\n if isinstance(\n self[0], Move\n ): # Move remains in place but references next element.\n start += 1\n self._reverse_segments(start, end)\n if size > 1:\n if isinstance(self[0], Move):\n self[0].end = Point(self[1].start)\n last = self[-1]\n if isinstance(last, Close):\n last.reverse()\n if last.start != self[-2].end:\n last.start = Point(self[-2].end)\n if last.end != self[0].end:\n last.end = Point(self[0].end)\n return self\n\n\nclass Group(SVGElement, Transformable, list):\n \"\"\"\n Group Container element can have children.\n SVG 2.0 <g> are defined in:\n 5.2. Grouping: the g element\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n Transformable.__init__(self, *args, **kwargs)\n list.__init__(self)\n if len(args) >= 1:\n s = args[0]\n if isinstance(s, Group):\n self.extend(list(map(copy, s)))\n return\n SVGElement.__init__(self, *args, **kwargs)\n\n def __imul__(self, other):\n if isinstance(other, str):\n other = Matrix(other)\n if isinstance(other, Matrix):\n self.transform *= other\n for e in self:\n e *= other\n return self\n\n def render(self, **kwargs):\n Transformable.render(self, **kwargs)\n\n def __copy__(self):\n return Group(self)\n\n def property_by_object(self, s):\n Transformable.property_by_object(self, s)\n\n def property_by_values(self, values):\n Transformable.property_by_values(self, values)\n\n def select(self, conditional=None):\n \"\"\"\n Finds all flattened subobjects of this group for which the conditional returns\n true.\n\n :param conditional: function taking element and returns True to include or False if exclude\n \"\"\"\n if conditional is None:\n for subitem in self:\n yield subitem\n if isinstance(subitem, Group):\n for s in subitem.select(conditional):\n yield s\n else:\n for subitem in self:\n if conditional(subitem):\n yield subitem\n if isinstance(subitem, Group):\n for s in subitem.select(conditional):\n yield s\n\n def reify(self):\n Transformable.reify(self)\n\n def bbox(self, transformed=True):\n \"\"\"\n Returns the bounding box of the given object.\n\n In the case of groups this is the union of all the bounding boxes of all bound children.\n\n Setting transformed to false, may yield unexpected results if subitems are transformed in non-uniform\n ways.\n\n :param transformed: bounding box of the properly transformed children.\n :return:\n \"\"\"\n boundary_points = []\n for e in self.select():\n if not hasattr(e, 'bbox'):\n continue\n box = e.bbox(False)\n if box is None:\n continue\n top_left = (box[0], box[1])\n top_right = (box[2], box[1])\n bottom_left = (box[0], box[3])\n bottom_right = (box[2], box[3])\n if transformed:\n top_left = e.transform.point_in_matrix_space(top_left)\n top_right = e.transform.point_in_matrix_space(top_right)\n bottom_left = e.transform.point_in_matrix_space(bottom_left)\n bottom_right = e.transform.point_in_matrix_space(bottom_right)\n boundary_points.append(top_left)\n boundary_points.append(top_right)\n boundary_points.append(bottom_left)\n boundary_points.append(bottom_right)\n if len(boundary_points) == 0:\n return None\n xmin = min([e[0] for e in boundary_points])\n ymin = min([e[1] for e in boundary_points])\n xmax = max([e[0] for e in boundary_points])\n ymax = max([e[1] for e in boundary_points])\n return xmin, ymin, xmax, ymax\n\n\nclass ClipPath(SVGElement, list):\n \"\"\"\n clipPath elements are defined in svg 14.3.5\n https://www.w3.org/TR/SVG11/masking.html#ClipPathElement\n\n Clip paths conceptually define a 1 bit mask for images these are usually defined within\n def blocks and do not render themselves but rather are attached by IRI references to the\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n list.__init__(self)\n self.unit_type = SVG_UNIT_TYPE_USERSPACEONUSE\n SVGElement.__init__(self, *args, **kwargs)\n\n def property_by_object(self, s):\n SVGElement.property_by_object(self, s)\n self.unit_type = s.unit_type\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n self.unit_type = self.values.get(\n SVG_ATTR_CLIP_UNIT_TYPE, SVG_UNIT_TYPE_USERSPACEONUSE\n )\n\n\nclass Pattern(SVGElement, list):\n def __init__(self, *args, **kwargs):\n self.viewbox = None\n self.preserve_aspect_ratio = None\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.href = None\n self.pattern_content_units = None # UserSpaceOnUse default\n self.pattern_transform = None\n self.pattern_units = None\n SVGElement.__init__(self, *args, **kwargs)\n\n def __int__(self):\n return 0\n\n @property\n def viewbox_transform(self):\n if self.viewbox is None:\n return \"\"\n return self.viewbox.transform(self)\n\n def property_by_object(self, s):\n SVGElement.property_by_object(self, s)\n self.viewbox = s.viewbox\n self.preserve_aspect_ratio = s.preserve_aspect_ratio\n\n self.x = s.x\n self.y = s.y\n self.width = s.width\n self.height = s.height\n self.href = s.href\n self.pattern_content_units = s.pattern_contents_units\n self.pattern_transform = (\n Matrix(s.pattern_transform) if s.pattern_transform is not None else None\n )\n self.pattern_units = s.pattern_units\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n if XLINK_HREF in values:\n self.href = values[XLINK_HREF]\n elif SVG_HREF in values:\n self.href = values[SVG_HREF]\n viewbox = values.get(SVG_ATTR_VIEWBOX)\n if viewbox is not None:\n self.viewbox = Viewbox(viewbox)\n if SVG_ATTR_PRESERVEASPECTRATIO in values:\n self.preserve_aspect_ratio = values[SVG_ATTR_PRESERVEASPECTRATIO]\n self.x = Length(values.get(SVG_ATTR_X, 0)).value()\n self.y = Length(values.get(SVG_ATTR_Y, 0)).value()\n self.width = Length(values.get(SVG_ATTR_WIDTH, \"100%\")).value()\n self.height = Length(values.get(SVG_ATTR_HEIGHT, \"100%\")).value()\n if SVG_ATTR_PATTERN_CONTENT_UNITS in values:\n self.pattern_content_units = values[SVG_ATTR_PATTERN_CONTENT_UNITS]\n if SVG_ATTR_PATTERN_TRANSFORM in values:\n self.pattern_transform = Matrix(values[SVG_ATTR_PATTERN_TRANSFORM])\n if SVG_ATTR_PATTERN_UNITS in values:\n self.pattern_units = values[SVG_ATTR_PATTERN_UNITS]\n\n def render(self, **kwargs):\n if self.pattern_transform is not None:\n self.pattern_transform.render(**kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.x, Length):\n self.x = self.x.value(relative_length=width, **kwargs)\n if isinstance(self.y, Length):\n self.y = self.y.value(relative_length=height, **kwargs)\n if isinstance(self.width, Length):\n self.width = self.width.value(relative_length=width, **kwargs)\n if isinstance(self.height, Length):\n self.height = self.height.value(relative_length=height, **kwargs)\n return self\n\n\nclass SVGText(SVGElement, GraphicObject, Transformable):\n \"\"\"\n SVG Text are defined in SVG 2.0 Chapter 11\n\n No methods are implemented to perform a text to path conversion.\n\n However, if such a method exists the assumption is that the results will be\n placed in the .path attribute, and functions like bbox() will check if such\n a value exists.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args) >= 1:\n self.text = args[0]\n else:\n self.text = \"\"\n self.width = 0\n self.height = 0\n self.x = 0\n self.y = 0\n self.dx = 0\n self.dy = 0\n self.anchor = \"start\" # start, middle, end.\n self.font_family = \"san-serif\"\n self.font_size = 16.0 # 16 point font 'normal'\n self.font_weight = 400.0 # Thin=100, Normal=400, Bold=700\n self.font_face = \"\"\n\n self.path = None\n Transformable.__init__(self, *args, **kwargs)\n GraphicObject.__init__(self, *args, **kwargs)\n SVGElement.__init__(self, *args, **kwargs)\n\n def __str__(self):\n parts = list()\n parts.append(\"'%s'\" % self.text)\n parts.append(\"font_family=%s\" % self.font_family)\n parts.append(\"anchor=%s\" % self.anchor)\n parts.append(\"font_size=%d\" % self.font_size)\n parts.append(\"font_weight=%s\" % str(self.font_weight))\n return \"Text(%s)\" % (\", \".join(parts))\n\n def __repr__(self):\n parts = list()\n parts.append(\"%s\" % self.text)\n parts.append(\"font_family=%s\" % self.font_family)\n parts.append(\"anchor=%s\" % self.anchor)\n parts.append(\"font_size=%d\" % self.font_size)\n parts.append(\"font_weight=%s\" % str(self.font_weight))\n return \"Text(%s)\" % (\", \".join(parts))\n\n def property_by_object(self, s):\n Transformable.property_by_object(self, s)\n GraphicObject.property_by_object(self, s)\n self.text = s.text\n self.x = s.x\n self.y = s.y\n self.width = s.width\n self.height = s.height\n self.dx = s.dx\n self.dy = s.dy\n self.anchor = s.anchor\n self.font_family = s.font_family\n self.font_size = s.font_size\n self.font_weight = s.font_weight\n self.font_face = s.font_face\n\n def parse_font(self, font):\n \"\"\"\n CSS Fonts 3 has a shorthand font property which serves to provide a single location to define:\n ‘font-style’, ‘font-variant’, ‘font-weight’, ‘font-stretch’, ‘font-size’, ‘line-height’, and ‘font-family’\n\n font-style: normal | italic | oblique\n font-variant: normal | small-caps\n font-weight: normal | bold | bolder | lighter | 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900\n font-stretch: normal | ultra-condensed | extra-condensed | condensed | semi-condensed | semi-expanded | expanded | extra-expanded | ultra-expanded\n font-size: <absolute-size> | <relative-size> | <length-percentage>\n line-height: '/' <‘line-height’>\n font-family: [ <family-name> | <generic-family> ] #\n generic-family: ‘serif’, ‘sans-serif’, ‘cursive’, ‘fantasy’, and ‘monospace’\n \"\"\"\n # https://www.w3.org/TR/css-fonts-3/#font-prop\n font_elements = list(*re.findall(REGEX_CSS_FONT, font))\n\n font_style = font_elements[0]\n font_variant = font_elements[1]\n font_weight = font_elements[2]\n font_stretch = font_elements[3]\n font_size = font_elements[4]\n line_height = font_elements[5]\n font_face = font_elements[6]\n font_family = font_elements[7]\n if len(font_weight) > 0:\n self.font_weight = self.parse_font_weight(font_weight)\n if len(font_size) > 0:\n self.font_size = Length(font_size).value()\n if len(font_face) > 0:\n if font_face.endswith(\",\"):\n font_face = font_face[:-1]\n self.font_face = font_face\n\n if len(font_family) > 0:\n self.font_family = font_family\n\n def parse_font_weight(self, weight):\n if weight == \"bold\":\n return 700\n if weight == \"normal\":\n return 400\n try:\n return int(weight)\n except KeyError:\n return 400\n\n def property_by_values(self, values):\n Transformable.property_by_values(self, values)\n GraphicObject.property_by_values(self, values)\n self.anchor = values.get(SVG_ATTR_TEXT_ANCHOR, self.anchor)\n self.font_face = values.get(SVG_ATTR_FONT_FACE)\n self.font_family = values.get(SVG_ATTR_FONT_FAMILY, self.font_family)\n self.font_size = Length(values.get(SVG_ATTR_FONT_SIZE, self.font_size)).value()\n self.font_weight = values.get(SVG_ATTR_FONT_WEIGHT, self.font_weight)\n font = values.get(SVG_ATTR_FONT, None)\n if font is not None:\n self.parse_font(font)\n self.text = values.get(SVG_TAG_TEXT, self.text)\n self.x = Length(values.get(SVG_ATTR_X, self.x)).value()\n self.y = Length(values.get(SVG_ATTR_Y, self.y)).value()\n self.dx = Length(values.get(SVG_ATTR_DX, self.dx)).value()\n self.dy = Length(values.get(SVG_ATTR_DY, self.dy)).value()\n\n def reify(self):\n GraphicObject.reify(self)\n Transformable.reify(self)\n\n def render(self, **kwargs):\n GraphicObject.render(self, **kwargs)\n Transformable.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.x, Length):\n self.x = self.x.value(relative_length=width, **kwargs)\n if isinstance(self.y, Length):\n self.y = self.y.value(relative_length=height, **kwargs)\n if isinstance(self.dx, Length):\n self.dx = self.dx.value(relative_length=width, **kwargs)\n if isinstance(self.dy, Length):\n self.dy = self.dy.value(relative_length=height, **kwargs)\n return self\n\n def __copy__(self):\n return SVGText(self)\n\n def bbox(self, transformed=True):\n \"\"\"\n Get the bounding box for the given text object.\n \"\"\"\n if self.path is not None:\n return (self.path * self.transform).bbox(transformed=True)\n width = self.width\n height = self.height\n xmin = self.x\n ymin = self.y - height\n xmax = self.x + width\n ymax = self.y\n if not hasattr(self, \"anchor\") or self.anchor == \"start\":\n pass\n elif self.anchor == \"middle\":\n xmin -= width / 2\n xmax -= width / 2\n elif self.anchor == \"end\":\n xmin -= width\n xmax -= width\n if transformed:\n p0 = self.transform.transform_point([xmin, ymin])\n p1 = self.transform.transform_point([xmin, ymax])\n p2 = self.transform.transform_point([xmax, ymin])\n p3 = self.transform.transform_point([xmax, ymax])\n xmin = min(p0[0], p1[0], p2[0], p3[0])\n ymin = min(p0[1], p1[1], p2[1], p3[1])\n xmax = max(p0[0], p1[0], p2[0], p3[0])\n ymax = max(p0[1], p1[1], p2[1], p3[1])\n return xmin, ymin, xmax, ymax\n\n\nclass SVGImage(SVGElement, GraphicObject, Transformable):\n \"\"\"\n SVG Images are defined in SVG 2.0 12.3\n\n This class is called SVG Image rather than image as a guard against many Image objects\n which are quite useful and would be ideal for reading the linked or contained data.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.url = None\n self.data = None\n self.viewbox = None\n self.preserve_aspect_ratio = None\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n\n self.image = None\n self.image_width = None\n self.image_height = None\n Transformable.__init__(self, *args, **kwargs)\n GraphicObject.__init__(self, *args, **kwargs)\n SVGElement.__init__(\n self, *args, **kwargs\n ) # Dataurl requires this be processed first.\n\n if self.url is not None:\n if self.url.startswith(\"data:image/\"):\n # Data URL\n from base64 import b64decode\n\n if self.url.startswith(\"data:image/png;base64,\"):\n self.data = b64decode(self.url[22:])\n elif self.url.startswith(\"data:image/jpg;base64,\"):\n self.data = b64decode(self.url[22:])\n elif self.url.startswith(\"data:image/jpeg;base64,\"):\n self.data = b64decode(self.url[23:])\n elif self.url.startswith(\"data:image/svg+xml;base64,\"):\n self.data = b64decode(self.url[26:])\n\n def property_by_object(self, s):\n SVGElement.property_by_object(self, s)\n Transformable.property_by_object(self, s)\n GraphicObject.property_by_object(self, s)\n self.url = s.url\n self.data = s.data\n self.viewbox = s.viewbox\n self.preserve_aspect_ratio = s.preserve_aspect_ratio\n\n self.x = s.x\n self.y = s.y\n self.width = s.width\n self.height = s.height\n\n self.image = s.image\n self.image_width = s.image_width\n self.image_height = s.image_height\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n Transformable.property_by_values(self, values)\n GraphicObject.property_by_values(self, values)\n if XLINK_HREF in values:\n self.url = values[XLINK_HREF]\n elif SVG_HREF in values:\n self.url = values[SVG_HREF]\n viewbox = values.get(SVG_ATTR_VIEWBOX)\n if viewbox is not None:\n self.viewbox = Viewbox(viewbox)\n if SVG_ATTR_PRESERVEASPECTRATIO in values:\n self.preserve_aspect_ratio = values[SVG_ATTR_PRESERVEASPECTRATIO]\n self.x = Length(values.get(SVG_ATTR_X, 0)).value()\n self.y = Length(values.get(SVG_ATTR_Y, 0)).value()\n self.width = Length(values.get(SVG_ATTR_WIDTH, \"100%\")).value()\n self.height = Length(values.get(SVG_ATTR_HEIGHT, \"100%\")).value()\n if \"image\" in values:\n self.image = values[\"image\"]\n self.image_width, self.image_height = self.image.size\n\n def render(self, **kwargs):\n GraphicObject.render(self, **kwargs)\n Transformable.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n if isinstance(self.x, Length):\n self.x = self.x.value(relative_length=width, **kwargs)\n if isinstance(self.y, Length):\n self.y = self.y.value(relative_length=height, **kwargs)\n if isinstance(self.width, Length):\n self.width = self.width.value(relative_length=width, **kwargs)\n if isinstance(self.height, Length):\n self.height = self.height.value(relative_length=height, **kwargs)\n return self\n\n def __copy__(self):\n \"\"\"\n Copy of SVGImage. This will not copy the .image subobject in a deep manner\n since it's optional that that object will exist or not. As such if using PIL it would\n be required to either say self.image = self.image.copy() or call .load() again.\n \"\"\"\n return SVGImage(self)\n\n @property\n def viewbox_transform(self):\n if self.viewbox is None:\n return \"\"\n return self.viewbox.transform(self)\n\n def load(self, directory=None):\n try:\n from PIL import Image\n\n if self.data is not None:\n self.load_data()\n elif self.url is not None:\n self.load_file(directory)\n self.set_values_by_image()\n except ImportError:\n pass\n\n def load_data(self):\n try:\n # This code will not activate without PIL/Pillow installed.\n from PIL import Image\n\n if self.data is not None:\n from io import BytesIO\n\n self.image = Image.open(BytesIO(self.data))\n else:\n return\n except ImportError:\n # PIL/Pillow not found, decoding data is most we can do.\n pass\n\n def load_file(self, directory):\n try:\n # This code will not activate without PIL/Pillow installed.\n from PIL import Image\n\n if self.url is not None:\n try:\n self.image = Image.open(self.url)\n except IOError:\n try:\n if directory is not None:\n from os.path import join\n\n relpath = join(directory, self.url)\n self.image = Image.open(relpath)\n except IOError:\n return\n except ImportError:\n # PIL/Pillow not found, decoding data is most we can do.\n pass\n\n def set_values_by_image(self):\n if self.image is None:\n return # No image to set values by.\n self.image_width = self.image.width\n self.image_height = self.image.height\n self.viewbox = Viewbox(\n \"0 0 %d %d\" % (self.image_width, self.image_height),\n self.preserve_aspect_ratio,\n )\n self.render(width=self.image_width, height=self.image_height)\n self.transform = Matrix(self.viewbox_transform) * self.transform\n\n def bbox(self, transformed=True):\n \"\"\"\n Get the bounding box for the given image object\n \"\"\"\n if self.image_width is None or self.image_height is None:\n p = Point(0, 0)\n p *= self.transform\n return p.x, p.y, p.x, p.y\n width = self.image_width\n height = self.image_height\n if transformed:\n p = (\n Point(0, 0) * self.transform,\n Point(width, 0) * self.transform,\n Point(width, height) * self.transform,\n Point(0, height) * self.transform,\n )\n else:\n p = (Point(0, 0), Point(width, 0), Point(width, height), Point(0, height))\n x_vals = list(s.x for s in p)\n y_vals = list(s.y for s in p)\n min_x = min(x_vals)\n min_y = min(y_vals)\n max_x = max(x_vals)\n max_y = max(y_vals)\n return min_x, min_y, max_x, max_y\n\n\nclass Desc(SVGElement):\n def __init__(self, values, desc=None):\n self.desc = desc\n SVGElement.__init__(self, **values)\n\n def property_by_object(self, obj):\n SVGElement.property_by_object(self, obj)\n self.desc = obj.desc\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n if SVG_TAG_DESC in values:\n self.desc = values[SVG_TAG_DESC]\n\n\nSVGDesc = Desc\n\n\nclass Title(SVGElement):\n def __init__(self, values, title=None):\n self.title = title\n SVGElement.__init__(self, **values)\n\n def property_by_object(self, obj):\n SVGElement.property_by_object(self, obj)\n self.title = obj.title\n\n def property_by_values(self, values):\n SVGElement.property_by_values(self, values)\n if SVG_TAG_TITLE in values:\n self.title = values[SVG_TAG_TITLE]\n\n\nclass SVG(Group):\n \"\"\"\n SVG Document and Parsing.\n\n SVG is the SVG main object and also the embedded SVGs within it. It's a subtype of Group. The SVG has a viewbox,\n and parsing methods which can be used if given a stream, path, or svg string.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.objects = {}\n self.x = None\n self.y = None\n self.width = None\n self.height = None\n self.viewbox = None\n Group.__init__(self, *args, **kwargs)\n\n @property\n def implicit_position(self):\n if not self.apply:\n return Point(self.x, self.y)\n point = Point(self.x, self.y)\n point *= self.transform\n return point\n\n @property\n def implicit_x(self):\n if not self.apply:\n return self.x\n return self.implicit_position[0]\n\n @property\n def implicit_y(self):\n if not self.apply:\n return self.y\n return self.implicit_position[1]\n\n @property\n def implicit_width(self):\n if not self.apply:\n return self.width\n p = Point(self.width, 0)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n @property\n def implicit_height(self):\n if not self.apply:\n return self.height\n p = Point(0, self.height)\n p *= self.transform\n origin = Point(0, 0)\n origin *= self.transform\n return origin.distance_to(p)\n\n def property_by_object(self, s):\n Group.property_by_object(self, s)\n self.x = s.x\n self.y = s.y\n self.width = s.width\n self.height = s.height\n self.viewbox = Viewbox(s.viewbox) if s.viewbox is not None else None\n\n def property_by_values(self, values):\n Group.property_by_values(self, values)\n self.x = Length(values.get(SVG_ATTR_X, 0)).value()\n self.y = Length(values.get(SVG_ATTR_Y, 0)).value()\n self.width = Length(values.get(SVG_ATTR_WIDTH, \"100%\")).value()\n self.height = Length(values.get(SVG_ATTR_HEIGHT, \"100%\")).value()\n viewbox = values.get(SVG_ATTR_VIEWBOX)\n par = values.get(SVG_ATTR_PRESERVEASPECTRATIO)\n self.viewbox = Viewbox(viewbox, par) if viewbox is not None else None\n\n def get_element_by_id(self, id):\n return self.objects.get(id)\n\n def get_element_by_url(self, url):\n for _id in REGEX_IRI.findall(url):\n return self.get_element_by_id(_id)\n\n def render(self, **kwargs):\n Group.render(self, **kwargs)\n width = kwargs.get(\"width\", kwargs.get(\"relative_length\"))\n height = kwargs.get(\"height\", kwargs.get(\"relative_length\"))\n try:\n del kwargs[\"relative_length\"]\n except KeyError:\n pass\n self.width = Length(self.width).value(relative_length=width, **kwargs)\n self.height = Length(self.height).value(relative_length=height, **kwargs)\n self.x = Length(self.x).value(relative_length=width, **kwargs)\n self.y = Length(self.y).value(relative_length=height, **kwargs)\n\n def elements(self, conditional=None):\n yield self\n for q in self.select(conditional):\n yield q\n\n @property\n def viewbox_transform(self):\n if self.viewbox is None:\n return \"\"\n return self.viewbox.transform(self)\n\n @staticmethod\n def _shadow_iter(tag, elem, children):\n yield tag, \"start\", elem\n try:\n for t, e, c in children:\n for shadow_tag, shadow_event, shadow_elem in SVG._shadow_iter(t, e, c):\n yield shadow_tag, shadow_event, shadow_elem\n except ValueError:\n \"\"\"\n Strictly speaking it is possible to reference use from other use objects. If this is an infinite loop\n we should not block the rendering. Just say we finished. See: W3C, struct-use-12-f\n \"\"\"\n pass\n yield tag, \"end\", elem\n\n @staticmethod\n def _use_structure_parse(source):\n \"\"\"\n SVG structure pass: parses the svg file such that it creates the structure implied by reused objects in a\n generalized context. Objects ids are read and put into an unparsed shadow tree. <use> objects seamlessly contain\n their definitions.\n \"\"\"\n defs = {}\n parent = None # Define Root Node.\n children = list()\n\n for event, elem in iterparse(source, events=(\"start\", \"end\", \"start-ns\")):\n try:\n tag = elem.tag\n if tag.startswith(\"{http://www.w3.org/2000/svg\"):\n tag = tag[28:] # Removing namespace. http://www.w3.org/2000/svg:\n except AttributeError:\n yield (None, event, elem)\n continue\n\n if event == \"start\":\n attributes = elem.attrib\n # Create new node.\n siblings = children # Parent's children are now my siblings.\n parent = (parent, children) # parent is now previous node context\n children = list() # new node has no children.\n node = (tag, elem, children) # define this node.\n siblings.append(node) # siblings now includes this node.\n\n if SVG_TAG_USE == tag:\n url = None\n if XLINK_HREF in attributes:\n url = attributes[XLINK_HREF]\n if SVG_HREF in attributes:\n url = attributes[SVG_HREF]\n if url is not None:\n transform = False\n try:\n x = attributes[SVG_ATTR_X]\n del attributes[SVG_ATTR_X]\n transform = True\n except KeyError:\n x = \"0\"\n try:\n y = attributes[SVG_ATTR_Y]\n del attributes[SVG_ATTR_Y]\n transform = True\n except KeyError:\n y = \"0\"\n if transform:\n try:\n attributes[\n SVG_ATTR_TRANSFORM\n ] = \"%s translate(%s, %s)\" % (\n attributes[SVG_ATTR_TRANSFORM],\n x,\n y,\n )\n except KeyError:\n attributes[SVG_ATTR_TRANSFORM] = \"translate(%s, %s)\" % (\n x,\n y,\n )\n yield (tag, event, elem)\n try:\n shadow_node = defs[url[1:]]\n children.append(\n shadow_node\n ) # Shadow children are children of the use.\n for n in SVG._shadow_iter(*shadow_node):\n yield n\n except KeyError:\n pass # Failed to find link.\n else:\n yield (tag, event, elem)\n if SVG_ATTR_ID in attributes: # If we have an ID, we save the node.\n defs[attributes[SVG_ATTR_ID]] = node # store node value in defs.\n elif event == \"end\":\n yield (tag, event, elem)\n # event is 'end', pop values.\n parent, children = parent # Parent is now node.\n\n @staticmethod\n def parse(\n source,\n reify=True,\n ppi=DEFAULT_PPI,\n width=1000,\n height=1000,\n color=\"black\",\n transform=None,\n context=None,\n ):\n \"\"\"\n Parses the SVG file. All attributes are things which the SVG document itself could not be aware of, such as\n the real size of pixels and the size of the viewport (as opposed to the viewbox).\n\n :param source: Source svg file or stream.\n :param reify: Should the Geometry sized or have lazy matrices.\n :param ppi: How many physical pixels per inch are there in this view.\n :param width: The physical width of the viewport\n :param height: The physical height of the viewport\n :param color: the `currentColor` value from outside the current scope.\n :param transform: Any required transformations to be pre-applied to this document\n :param context: Any existing document context.\n :return:\n \"\"\"\n clip = 0\n root = context\n styles = {}\n stack = []\n\n values = {\n SVG_ATTR_COLOR: color,\n SVG_ATTR_FILL: \"black\",\n SVG_ATTR_STROKE: \"none\",\n }\n\n if transform is not None:\n values[SVG_ATTR_TRANSFORM] = transform\n\n for tag, event, elem in SVG._use_structure_parse(source):\n \"\"\"\n SVG element parsing parses the job compiling any parsed elements into their compiled object forms.\n \"\"\"\n # print(event, elem)\n if event == \"start\":\n stack.append((context, values))\n if (\n SVG_ATTR_DISPLAY in values\n and values[SVG_ATTR_DISPLAY] == SVG_VALUE_NONE\n ):\n continue # Values has a display=none. Do not render anything. No Shadow Dom.\n current_values = values\n values = {}\n values.update(current_values) # copy of dictionary\n\n # Non-propagating values.\n if SVG_ATTR_PRESERVEASPECTRATIO in values:\n del values[SVG_ATTR_PRESERVEASPECTRATIO]\n if SVG_ATTR_VIEWBOX in values:\n del values[SVG_ATTR_VIEWBOX]\n if SVG_ATTR_ID in values:\n del values[SVG_ATTR_ID]\n if SVG_ATTR_CLIP_PATH in values:\n del values[SVG_ATTR_CLIP_PATH]\n\n attributes = elem.attrib # priority; lowest\n attributes[SVG_ATTR_TAG] = tag\n\n # Split any Style block elements into parts; priority medium\n style = \"\"\n if \"*\" in styles: # Select all.\n style += styles[\"*\"]\n if tag in styles: # selector type\n style += styles[tag]\n if SVG_ATTR_ID in attributes: # Selector id #id\n svg_id = attributes[SVG_ATTR_ID]\n css_tag = \"#%s\" % svg_id\n if css_tag in styles:\n if len(style) != 0:\n style += \";\"\n style += styles[css_tag]\n if SVG_ATTR_CLASS in attributes: # Selector class .class\n for svg_class in attributes[SVG_ATTR_CLASS].split(\" \"):\n css_tag = \".%s\" % svg_class\n if css_tag in styles:\n if len(style) != 0:\n style += \";\"\n style += styles[css_tag]\n css_tag = \"%s.%s\" % (\n tag,\n svg_class,\n ) # Selector type/class type.class\n if css_tag in styles:\n if len(style) != 0:\n style += \";\"\n style += styles[css_tag]\n # Split style element into parts; priority highest\n if SVG_ATTR_STYLE in attributes:\n style += attributes[SVG_ATTR_STYLE]\n\n # Process style tag left to right.\n for equate in style.split(\";\"):\n equal_item = equate.split(\":\")\n if len(equal_item) == 2:\n key = str(equal_item[0]).strip()\n value = str(equal_item[1]).strip()\n attributes[key] = value\n if (\n SVG_ATTR_FILL in attributes\n and attributes[SVG_ATTR_FILL] == SVG_VALUE_CURRENT_COLOR\n ):\n if SVG_ATTR_COLOR in attributes:\n attributes[SVG_ATTR_FILL] = attributes[SVG_ATTR_COLOR]\n else:\n attributes[SVG_ATTR_FILL] = values[SVG_ATTR_COLOR]\n\n if (\n SVG_ATTR_STROKE in attributes\n and attributes[SVG_ATTR_STROKE] == SVG_VALUE_CURRENT_COLOR\n ):\n if SVG_ATTR_COLOR in attributes:\n attributes[SVG_ATTR_STROKE] = attributes[SVG_ATTR_COLOR]\n else:\n attributes[SVG_ATTR_STROKE] = values[SVG_ATTR_COLOR]\n\n if SVG_ATTR_TRANSFORM in attributes:\n # If transform is already in values, append the new value.\n if SVG_ATTR_TRANSFORM in values:\n attributes[SVG_ATTR_TRANSFORM] = (\n values[SVG_ATTR_TRANSFORM]\n + \" \"\n + attributes[SVG_ATTR_TRANSFORM]\n )\n else:\n attributes[SVG_ATTR_TRANSFORM] = attributes[SVG_ATTR_TRANSFORM]\n\n # All class and attribute properties are compiled.\n\n values.update(attributes)\n if (\n SVG_ATTR_DISPLAY in values\n and values[SVG_ATTR_DISPLAY] == SVG_VALUE_NONE\n ):\n continue # If the attributes flags our values to display=none, stop rendering.\n if SVG_NAME_TAG == tag:\n # The ordering for transformations on the SVG object are:\n # explicit transform, parent transforms, attribute transforms, viewport transforms\n s = SVG(values)\n s.render(ppi=ppi, width=width, height=height)\n height, width = s.width, s.height\n if s.viewbox is not None:\n try:\n if s.height == 0 or s.width == 0:\n return s\n viewport_transform = s.viewbox_transform\n except ZeroDivisionError:\n # The width or height was zero.\n # https://www.w3.org/TR/SVG11/struct.html#SVGElementWidthAttribute\n # \"A value of zero disables rendering of the element.\"\n return s # No more parsing will be done.\n\n if SVG_ATTR_TRANSFORM in values:\n # transform on SVG element applied as if svg had parent with transform.\n values[SVG_ATTR_TRANSFORM] += \" \" + viewport_transform\n else:\n values[SVG_ATTR_TRANSFORM] = viewport_transform\n width, height = s.viewbox.width, s.viewbox.height\n if context is None:\n stack[-1] = (context, values)\n if context is not None:\n context.append(s)\n context = s\n if root is None:\n root = s\n elif SVG_TAG_GROUP == tag:\n s = Group(values)\n context.append(s)\n context = s\n s.render(ppi=ppi, width=width, height=height)\n elif SVG_TAG_DEFS == tag:\n s = Group(values)\n context = s # Non-Rendered\n s.render(ppi=ppi, width=width, height=height)\n elif SVG_TAG_CLIPPATH == tag:\n s = ClipPath(values)\n context = s # Non-Rendered\n s.render(ppi=ppi, width=width, height=height)\n clip += 1\n elif SVG_TAG_PATTERN == tag:\n s = Pattern(values)\n context = s # Non-rendered\n s.render(ppi=ppi, width=width, height=height)\n elif tag in (\n SVG_TAG_PATH,\n SVG_TAG_CIRCLE,\n SVG_TAG_ELLIPSE,\n SVG_TAG_LINE, # Shapes\n SVG_TAG_POLYLINE,\n SVG_TAG_POLYGON,\n SVG_TAG_RECT,\n SVG_TAG_IMAGE,\n ):\n try:\n if SVG_TAG_PATH == tag:\n s = Path(values)\n elif SVG_TAG_CIRCLE == tag:\n s = Circle(values)\n elif SVG_TAG_ELLIPSE == tag:\n s = Ellipse(values)\n elif SVG_TAG_LINE == tag:\n s = SimpleLine(values)\n elif SVG_TAG_POLYLINE == tag:\n s = Polyline(values)\n elif SVG_TAG_POLYGON == tag:\n s = Polygon(values)\n elif SVG_TAG_RECT == tag:\n s = Rect(values)\n else: # SVG_TAG_IMAGE == tag:\n s = SVGImage(values)\n except ValueError:\n continue\n s.render(ppi=ppi, width=width, height=height)\n if reify:\n s.reify()\n context.append(s)\n elif tag in (\n SVG_TAG_STYLE,\n SVG_TAG_TEXT,\n SVG_TAG_DESC,\n SVG_TAG_TITLE,\n SVG_TAG_TSPAN,\n ):\n # <style>, <text>, <desc>, <title>\n continue\n else:\n s = SVGElement(values) # SVG Unknown object return as element.\n context.append(s)\n\n # Assign optional linked properties.\n try:\n clip_path_url = s.values.get(SVG_ATTR_CLIP_PATH, None)\n if clip_path_url is not None:\n clip_path = root.get_element_by_url(clip_path_url)\n s.clip_path = clip_path\n except AttributeError:\n pass\n if clip != 0:\n try:\n clip_rule = s.values.get(SVG_ATTR_CLIP_RULE, SVG_RULE_NONZERO)\n if clip_rule is not None:\n s.clip_rule = clip_rule\n except AttributeError:\n pass\n if SVG_ATTR_ID in values and root is not None:\n root.objects[attributes[SVG_ATTR_ID]] = s\n elif event == \"end\": # End event.\n # The iterparse spec makes it clear that internal text data is undefined except at the end.\n s = None\n if tag in (\n SVG_TAG_TEXT,\n SVG_TAG_TSPAN,\n SVG_TAG_DESC,\n SVG_TAG_TITLE,\n SVG_TAG_STYLE,\n ):\n attributes = elem.attrib\n if SVG_ATTR_ID in values and root is not None:\n root.objects[attributes[SVG_ATTR_ID]] = s\n if tag in (SVG_TAG_TEXT, SVG_TAG_TSPAN):\n s = SVGText(values, text=elem.text)\n s.render(ppi=ppi, width=width, height=height)\n if reify:\n s.reify()\n context.append(s)\n elif SVG_TAG_DESC == tag:\n s = Desc(values, desc=elem.text)\n context.append(s)\n elif SVG_TAG_TITLE == tag:\n s = Title(values, title=elem.text)\n context.append(s)\n elif SVG_TAG_STYLE == tag:\n assignments = list(re.findall(REGEX_CSS_STYLE, elem.text))\n for key, value in assignments:\n key = key.strip()\n value = value.strip()\n for selector in key.split(\",\"): # Can comma select subitems.\n styles[selector.strip()] = value\n elif SVG_TAG_CLIPPATH == tag:\n clip -= 1\n if s is not None:\n # Assign optional linked properties.\n try:\n clip_path_url = s.values.get(SVG_ATTR_CLIP_PATH, None)\n if clip_path_url is not None:\n clip_path = root.get_element_by_url(clip_path_url)\n s.clip_path = clip_path\n except AttributeError:\n pass\n if clip != 0:\n try:\n clip_rule = s.values.get(\n SVG_ATTR_CLIP_RULE, SVG_RULE_NONZERO\n )\n if clip_rule is not None:\n s.clip_rule = clip_rule\n except AttributeError:\n pass\n\n context, values = stack.pop()\n elif event == \"start-ns\":\n if elem[0] != SVG_ATTR_DATA:\n values[elem[0]] = elem[1]\n return root\n" ]
[ [ "scipy.special.ellipeinc", "numpy.cos", "numpy.sin", "numpy.interp", "scipy.integrate.quad", "numpy.array" ] ]
JeroenDM/sampling_based_tube_following_2
[ "b710b69c80600d35e31297184e8008b144ca1ec7" ]
[ "figure_1_sample_examples.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\nfrom acrolib.quaternion import Quaternion\nfrom acrobotics.util import get_default_axes3d, plot_reference_frame\nfrom acrobotics.util import rot_z\n\nfrom acrobotics.path.sampling import SampleMethod\nfrom acrobotics.path.tolerance import (\n Tolerance,\n NoTolerance,\n SymmetricTolerance,\n QuaternionTolerance,\n)\nfrom acrobotics.path.path_pt import TolPositionPt, TolEulerPt, TolQuatPt\n\n# ==================================================================================\n# Position samples\n# ==================================================================================\nR_pt = rot_z(np.deg2rad(30))\npos_pt = np.array([0.5, 0.5, 0.0])\n\npos_tol = [Tolerance(-0.1, 0.1, 3), Tolerance(-0.3, 0.3, 5), NoTolerance()]\n\npt_pos = TolPositionPt(pos_pt, Quaternion(matrix=R_pt), pos_tol)\n\n# ==================================================================================\n# Euler constraints samples\n# ==================================================================================\nR_pt = np.eye(3)\npos_tol = 3 * [NoTolerance()]\nrot_tol = [\n SymmetricTolerance(np.deg2rad(15), 5),\n NoTolerance(),\n Tolerance(0, np.pi / 2, 10),\n]\n\npt_eul = TolEulerPt(np.zeros(3), Quaternion(matrix=R_pt), pos_tol, rot_tol)\n\n# ==================================================================================\n# Quaternion constraints samples\n# ==================================================================================\nR_pt = np.eye(4)\npos_tol = 3 * [NoTolerance()]\nquat_tol = QuaternionTolerance(0.1)\n\npt_quat = TolQuatPt(np.zeros(3), Quaternion(matrix=R_pt), pos_tol, quat_tol)\n\n# ==================================================================================\n# Create plot and save it\n# ==================================================================================\nfig = plt.figure(figsize=plt.figaspect(1 / 3))\naxes = [fig.add_subplot(1, 3, i, projection=\"3d\") for i in [1, 2, 3]]\n\n# all axis show the path point reference frame and have no coordinate axes\nfor pt, ax in zip([pt_pos, pt_eul, pt_quat], axes):\n ax.set_axis_off()\n plot_reference_frame(ax, tf=pt.transformation_matrix, arrow_length=0.2)\n\n\n# here we plot the samples for each specific point\nax1, ax2, ax3 = axes[0], axes[1], axes[2]\n\nplot_reference_frame(ax1, arrow_length=0.3)\nfor tf in pt_pos.sample_grid():\n ax1.scatter(tf[0, 3], tf[1, 3], tf[2, 3], \"o\", c=\"black\")\n\nfor tf in pt_eul.sample_grid():\n plot_reference_frame(ax2, tf, arrow_length=0.1)\n\n\nfor tf in pt_quat.sample_incremental(50, SampleMethod.random_uniform):\n plot_reference_frame(ax3, tf, arrow_length=0.1)\n\n\n# here we tweak the view a bit to make it look nice\nax1.view_init(azim=20, elev=38)\nax1.set_xlim3d([-0.5, 0.5])\nax1.set_ylim3d([-0.5, 0.5])\nax1.set_zlim3d([-0.5, 0.5])\nax2.view_init(azim=35, elev=38)\nax3.view_init(azim=35, elev=38)\nplt.tight_layout()\n\nplt.savefig(\"figures/sample_examples.png\", dpi=200)\n" ]
[ [ "matplotlib.pyplot.figaspect", "matplotlib.pyplot.tight_layout", "numpy.eye", "matplotlib.pyplot.savefig", "numpy.deg2rad", "numpy.array", "numpy.zeros" ] ]
KeisukeNagakawa/cisim
[ "2f52b34ccd915f4b5a23f1ba502ecfdabf8aecfb" ]
[ "tests/test_stats.py" ]
[ "from unittest import TestCase\nfrom cisim.stats import BinomCI, HyperCI\n\n\nclass Testbinom(TestCase):\n\n def test_validation(self):\n self.assertRaises(ValueError, BinomCI, -100, 10, 0.05)\n self.assertRaises(ValueError, BinomCI, 100, -10, 0.05)\n self.assertRaises(ValueError, BinomCI, 100, 10, -0.05)\n\n def test_ci_sim(self):\n b = BinomCI(n_pop=100, n_obs=10, cl=0.05)\n res = b.ci_sim()\n self.assertEqual(\n [0.049005430267763495, 0.17622473596973592],\n res[0]\n )\n\n\nclass TestHyperCI(TestCase):\n def test_validation(self):\n self.assertRaises(ValueError, HyperCI, -100, 20, 5)\n self.assertRaises(ValueError, HyperCI, 100, -20, 5)\n self.assertRaises(ValueError, HyperCI, 100, 20, -5)\n\n def test_hypergeom_cdf_lower(self):\n # check hypergeom cdf to return right value\n from scipy.stats import hypergeom\n h = HyperCI(n_pop=100, n_draw=20, k_s_obs=5)\n k_s = 30\n res = hypergeom.cdf(h.k_s_obs, h.n_pop, k_s, h.n_draw)\n self.assertEqual(res, 0.4009887932548518)\n\n def test_hypergeom_cdf_upper(self):\n from scipy.stats import hypergeom\n h = HyperCI(n_pop=100, n_draw=20, k_s_obs=5)\n k_s = 30\n res = 1 - hypergeom.cdf(h.k_s_obs - 1, h.n_pop, k_s, h.n_draw)\n self.assertEqual(res, 0.7908367991741947)\n\n def test_ci_sim(self):\n h = HyperCI(n_pop=10 ** 4, n_draw=10 ** 3, k_s_obs=100)\n res = h.ci_sim()\n self.assertEqual(res[0], [830, 1193])\n" ]
[ [ "scipy.stats.hypergeom.cdf" ] ]
chxw20/volta
[ "57022a7e33d458a8245ffcd3131ae2f94375dd12" ]
[ "volta/datasets/visual_entailment_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# Copyright (c) 2020, Emanuele Bugliarello (@e-bug).\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport logging\nimport jsonlines\nimport _pickle as cPickle\n\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom pytorch_transformers.tokenization_bert import BertTokenizer\nfrom ._image_features_reader import ImageFeaturesH5Reader\n\n\nlogger = logging.getLogger(__name__)\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\nLABEL_MAP = {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}\n\n\ndef assert_eq(real, expected):\n assert real == expected, \"%s (true) vs %s (expected)\" % (real, expected)\n\n\ndef _create_entry(item):\n entry = {\n \"question_id\": item[\"question_id\"],\n \"image_id\": item[\"image_id\"],\n \"hypothesis\": item[\"hypothesis\"],\n \"answer\": item,\n }\n return entry\n\n\ndef _load_dataset(dataroot, name):\n \"\"\"Load entries\n\n dataroot: root path of dataset\n name: 'train', 'dev', 'test'\n \"\"\"\n if name == \"train\" or name == \"dev\" or name == \"test\":\n annotations_path = os.path.join(dataroot, \"snli_ve_%s.jsonl\" % name)\n with jsonlines.open(annotations_path) as reader:\n # Build an index which maps image id with a list of hypothesis annotations.\n items = []\n count = 0\n for annotation in reader:\n dictionary = {}\n dictionary[\"image_id\"] = int(annotation[\"Flikr30kID\"].split(\".\")[0])\n dictionary[\"question_id\"] = count\n dictionary[\"hypothesis\"] = str(annotation[\"sentence2\"])\n if str(annotation[\"gold_label\"]) == \"-\":\n dictionary[\"labels\"] = []\n dictionary[\"scores\"] = []\n else:\n dictionary[\"labels\"] = [\n int(LABEL_MAP[str(annotation[\"gold_label\"])])\n ]\n dictionary[\"scores\"] = [1.0]\n items.append(dictionary)\n count += 1\n else:\n assert False, \"data split is not recognized.\"\n entries = []\n for item in items:\n entries.append(_create_entry(item))\n return entries\n\n\nclass VisualEntailmentDataset(Dataset):\n def __init__(\n self,\n task: str,\n dataroot: str,\n annotations_jsonpath: str,\n split: str,\n image_features_reader: ImageFeaturesH5Reader,\n gt_image_features_reader: ImageFeaturesH5Reader,\n tokenizer: BertTokenizer,\n bert_model,\n padding_index: int = 0,\n max_seq_length: int = 16,\n max_region_num: int = 37,\n num_locs=5,\n add_global_imgfeat=None,\n append_mask_sep=False,\n ):\n super().__init__()\n self.split = split\n self.num_labels = 3\n self._max_region_num = max_region_num\n self._max_seq_length = max_seq_length\n self._image_features_reader = image_features_reader\n self._tokenizer = tokenizer\n self._padding_index = padding_index\n self._num_locs = num_locs\n self._add_global_imgfeat = add_global_imgfeat\n\n if \"roberta\" in bert_model:\n cache_path = os.path.join(\n dataroot,\n \"cache\",\n task\n + \"_\"\n + split\n + \"_\"\n + \"roberta\"\n + \"_\"\n + str(max_seq_length)\n + \".pkl\",\n )\n else:\n cache_path = os.path.join(\n dataroot,\n \"cache\",\n task\n + \"_\"\n + split\n + \"_\"\n + str(max_seq_length)\n + \".pkl\",\n )\n if not os.path.exists(cache_path):\n self.entries = _load_dataset(dataroot, split)\n self.tokenize(max_seq_length)\n self.tensorize()\n cPickle.dump(self.entries, open(cache_path, \"wb\"))\n else:\n logger.info(\"Loading from %s\" % cache_path)\n self.entries = cPickle.load(open(cache_path, \"rb\"))\n\n def tokenize(self, max_length=16):\n \"\"\"Tokenizes the questions.\n\n This will add q_token in each entry of the dataset.\n -1 represent nil, and should be treated as padding_index in embedding\n \"\"\"\n for entry in self.entries:\n tokens = self._tokenizer.encode(entry[\"hypothesis\"])\n tokens = tokens[:max_length - 2]\n tokens = self._tokenizer.add_special_tokens_single_sentence(tokens)\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n\n if len(tokens) < max_length:\n # Note here we pad in front of the sentence\n padding = [self._padding_index] * (max_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding\n\n assert_eq(len(tokens), max_length)\n entry[\"q_token\"] = tokens\n entry[\"q_input_mask\"] = input_mask\n entry[\"q_segment_ids\"] = segment_ids\n\n def tensorize(self):\n for entry in self.entries:\n question = torch.from_numpy(np.array(entry[\"q_token\"]))\n entry[\"q_token\"] = question\n\n q_input_mask = torch.from_numpy(np.array(entry[\"q_input_mask\"]))\n entry[\"q_input_mask\"] = q_input_mask\n\n q_segment_ids = torch.from_numpy(np.array(entry[\"q_segment_ids\"]))\n entry[\"q_segment_ids\"] = q_segment_ids\n\n answer = entry[\"answer\"]\n labels = np.array(answer[\"labels\"])\n scores = np.array(answer[\"scores\"], dtype=np.float32)\n if len(labels):\n labels = torch.from_numpy(labels)\n scores = torch.from_numpy(scores)\n entry[\"answer\"][\"labels\"] = labels\n entry[\"answer\"][\"scores\"] = scores\n else:\n entry[\"answer\"][\"labels\"] = None\n entry[\"answer\"][\"scores\"] = None\n\n def __getitem__(self, index):\n entry = self.entries[index]\n image_id = entry[\"image_id\"]\n question_id = entry[\"question_id\"]\n features, num_boxes, boxes, _ = self._image_features_reader[image_id]\n\n mix_num_boxes = min(int(num_boxes), self._max_region_num)\n mix_boxes_pad = np.zeros((self._max_region_num, self._num_locs))\n mix_features_pad = np.zeros((self._max_region_num, 2048))\n\n image_mask = [1] * (int(mix_num_boxes))\n while len(image_mask) < self._max_region_num:\n image_mask.append(0)\n\n mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]\n mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]\n\n features = torch.tensor(mix_features_pad).float()\n image_mask = torch.tensor(image_mask).long()\n spatials = torch.tensor(mix_boxes_pad).float()\n\n hypothesis = entry[\"q_token\"]\n input_mask = entry[\"q_input_mask\"]\n segment_ids = entry[\"q_segment_ids\"]\n\n target = torch.zeros(self.num_labels)\n\n answer = entry[\"answer\"]\n labels = answer[\"labels\"]\n scores = answer[\"scores\"]\n if labels is not None:\n target.scatter_(0, labels, scores)\n\n return features, spatials, image_mask, hypothesis, target, input_mask, segment_ids, question_id\n\n def __len__(self):\n return len(self.entries)\n" ]
[ [ "torch.zeros", "torch.from_numpy", "torch.tensor", "numpy.array", "numpy.zeros" ] ]
sismetanin/jiant
[ "00bc77ca633e8dcf83c40f7c6ef5bae9f359d999" ]
[ "jiant/scripts/download_data/dl_datasets/files_tasks.py" ]
[ "import json\nimport logging\nimport os\nimport pandas as pd\nimport re\nimport shutil\nimport tarfile\nfrom operator import itemgetter\nfrom collections import Counter\n\nimport jiant.scripts.download_data.utils as download_utils\nimport jiant.utils.display as display\nimport jiant.utils.python.filesystem as filesystem\nimport jiant.utils.python.io as py_io\n\nfrom jiant.scripts.download_data.constants import SQUAD_TASKS, DIRECT_SUPERGLUE_TASKS_TO_DATA_URLS\n\n\ndef download_task_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n if task_name in SQUAD_TASKS:\n download_squad_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name in DIRECT_SUPERGLUE_TASKS_TO_DATA_URLS:\n download_superglue_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"abductive_nli\":\n download_abductive_nli_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"arct\":\n download_arct_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"fever_nli\":\n download_fever_nli_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"swag\":\n download_swag_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"qamr\":\n download_qamr_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"qasrl\":\n download_qasrl_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"newsqa\":\n download_newsqa_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mctaco\":\n download_mctaco_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mctest160\":\n download_mctest160_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mctest500\":\n download_mctest500_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mrqa_natural_questions\":\n download_mrqa_natural_questions_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mutual\":\n download_mutual_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"mutual_plus\":\n download_mutual_plus_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"piqa\":\n download_piqa_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"winogrande\":\n download_winogrande_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == \"ropes\":\n download_ropes_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name in [\n \"acceptability_definiteness\",\n \"acceptability_coord\",\n \"acceptability_eos\",\n \"acceptability_whwords\",\n ]:\n download_acceptability_judgments_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == 'rusentiment':\n download_rusentiment_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == 'ruhate':\n download_ruhate_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == 'rudepression':\n download_rudepression_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name == 'ruhumor':\n download_ruhumor_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n elif task_name in [\n \"senteval_bigram_shift\",\n \"senteval_coordination_inversion\",\n \"senteval_obj_number\",\n \"senteval_odd_man_out\",\n \"senteval_past_present\",\n \"senteval_sentence_length\",\n \"senteval_subj_number\",\n \"senteval_top_constituents\",\n \"senteval_tree_depth\",\n \"senteval_word_content\",\n ]:\n download_senteval_data_and_write_config(\n task_name=task_name, task_data_path=task_data_path, task_config_path=task_config_path\n )\n else:\n raise KeyError(task_name)\n\n\ndef download_squad_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n if task_name == \"squad_v1\":\n train_file = \"train-v1.1.json\"\n dev_file = \"dev-v1.1.json\"\n version_2_with_negative = False\n elif task_name == \"squad_v2\":\n train_file = \"train-v2.0.json\"\n dev_file = \"dev-v2.0.json\"\n version_2_with_negative = True\n else:\n raise KeyError(task_name)\n\n os.makedirs(task_data_path, exist_ok=True)\n train_path = os.path.join(task_data_path, train_file)\n val_path = os.path.join(task_data_path, dev_file)\n download_utils.download_file(\n url=f\"https://rajpurkar.github.io/SQuAD-explorer/dataset/{train_file}\",\n file_path=train_path,\n )\n download_utils.download_file(\n url=f\"https://rajpurkar.github.io/SQuAD-explorer/dataset/{dev_file}\", file_path=val_path,\n )\n py_io.write_json(\n data={\n \"task\": \"squad\",\n \"paths\": {\"train\": train_path, \"val\": val_path},\n \"kwargs\": {\"version_2_with_negative\": version_2_with_negative},\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_superglue_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n # Applies to ReCoRD, MultiRC and WSC\n assert task_name not in SQUAD_TASKS\n\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n DIRECT_SUPERGLUE_TASKS_TO_DATA_URLS[task_name], task_data_path\n )\n\n # Move task data up one folder (nested under task name when unzipped)\n # ie: mv ./record/ReCoRD/* ./record\n nested_task_dir = os.path.join(\n task_data_path, filesystem.find_case_insensitive_filename(task_name, task_data_path)\n )\n task_data_files = os.listdir(nested_task_dir)\n for f in task_data_files:\n # Overwrite file if it exists (overwrite by full path specification)\n shutil.move(os.path.join(nested_task_dir, f), os.path.join(task_data_path, f))\n shutil.rmtree(nested_task_dir)\n\n # Supports datasets with non-standard dev dataset name\n if os.path.isfile(os.path.join(task_data_path, \"dev.jsonl\")):\n dev_data_name = \"dev.jsonl\"\n elif os.path.isfile(os.path.join(task_data_path, \"val.jsonl\")):\n dev_data_name = \"val.jsonl\"\n else:\n raise RuntimeError(\"Unsupported dev dataset name in downloaded task.\")\n\n val_path = os.path.join(task_data_path, dev_data_name)\n train_path = os.path.join(task_data_path, \"train.jsonl\")\n test_path = os.path.join(task_data_path, \"test.jsonl\")\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\"train\": train_path, \"val\": val_path, \"test\": test_path},\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_abductive_nli_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip\",\n task_data_path,\n )\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train_inputs\": os.path.join(task_data_path, \"train.jsonl\"),\n \"train_labels\": os.path.join(task_data_path, \"train-labels.lst\"),\n \"val_inputs\": os.path.join(task_data_path, \"dev.jsonl\"),\n \"val_labels\": os.path.join(task_data_path, \"dev-labels.lst\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_arct_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n os.makedirs(task_data_path, exist_ok=True)\n file_name_list = [\n \"train-doubled.tsv\",\n \"train-w-swap-doubled.tsv\",\n \"train-w-swap.tsv\",\n \"train.tsv\",\n \"dev.tsv\",\n \"test.tsv\",\n ]\n for file_name in file_name_list:\n download_utils.download_file(\n f\"https://raw.githubusercontent.com/UKPLab/argument-reasoning-comprehension-task/\"\n + f\"master/experiments/src/main/python/data/{file_name}\",\n os.path.join(task_data_path, file_name),\n )\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.tsv\"),\n \"val\": os.path.join(task_data_path, \"val.tsv\"),\n \"test\": os.path.join(task_data_path, \"test.tsv\"),\n \"train_doubled\": os.path.join(task_data_path, \"train-doubled.tsv\"),\n \"train_w_swap\": os.path.join(task_data_path, \"train-w-swap.tsv\"),\n \"train_w_swap_doubled\": os.path.join(task_data_path, \"train-w-swap-doubled.tsv\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_mctaco_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n file_name_list = [\"dev_3783.tsv\", \"test_9442.tsv\"]\n for file_name in file_name_list:\n download_utils.download_file(\n f\"https://raw.githubusercontent.com/CogComp/MCTACO/master/dataset/{file_name}\",\n os.path.join(task_data_path, file_name),\n )\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"val\": os.path.join(task_data_path, \"dev_3783.tsv\"),\n \"test\": os.path.join(task_data_path, \"test_9442.tsv\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_mctest160_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://mattr1.github.io/mctest/data/MCTest.zip\", task_data_path,\n )\n download_utils.download_and_unzip(\n \"https://mattr1.github.io/mctest/data/MCTestAnswers.zip\", task_data_path,\n )\n os.rename(\n os.path.join(task_data_path, \"MCTestAnswers\", f\"mc160.test.ans\"),\n os.path.join(task_data_path, \"MCTest\", f\"mc160.test.ans\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"MCTestAnswers\"))\n for phase in [\"train\", \"dev\", \"test\"]:\n os.rename(\n os.path.join(task_data_path, \"MCTest\", f\"mc160.{phase}.tsv\"),\n os.path.join(task_data_path, f\"mc160.{phase}.tsv\"),\n )\n os.rename(\n os.path.join(task_data_path, \"MCTest\", f\"mc160.{phase}.ans\"),\n os.path.join(task_data_path, f\"mc160.{phase}.ans\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"MCTest\"))\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"mc160.train.tsv\"),\n \"train_ans\": os.path.join(task_data_path, \"mc160.train.ans\"),\n \"val\": os.path.join(task_data_path, \"mc160.dev.tsv\"),\n \"val_ans\": os.path.join(task_data_path, \"mc160.dev.ans\"),\n \"test\": os.path.join(task_data_path, \"mc160.test.tsv\"),\n \"test_ans\": os.path.join(task_data_path, \"mc160.test.ans\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_mctest500_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://mattr1.github.io/mctest/data/MCTest.zip\", task_data_path,\n )\n download_utils.download_and_unzip(\n \"https://mattr1.github.io/mctest/data/MCTestAnswers.zip\", task_data_path,\n )\n os.rename(\n os.path.join(task_data_path, \"MCTestAnswers\", f\"mc500.test.ans\"),\n os.path.join(task_data_path, \"MCTest\", f\"mc500.test.ans\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"MCTestAnswers\"))\n for phase in [\"train\", \"dev\", \"test\"]:\n os.rename(\n os.path.join(task_data_path, \"MCTest\", f\"mc500.{phase}.tsv\"),\n os.path.join(task_data_path, f\"mc500.{phase}.tsv\"),\n )\n os.rename(\n os.path.join(task_data_path, \"MCTest\", f\"mc500.{phase}.ans\"),\n os.path.join(task_data_path, f\"mc500.{phase}.ans\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"MCTest\"))\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"mc500.train.tsv\"),\n \"train_ans\": os.path.join(task_data_path, \"mc500.train.ans\"),\n \"val\": os.path.join(task_data_path, \"mc500.dev.tsv\"),\n \"val_ans\": os.path.join(task_data_path, \"mc500.dev.ans\"),\n \"test\": os.path.join(task_data_path, \"mc500.test.tsv\"),\n \"test_ans\": os.path.join(task_data_path, \"mc500.test.ans\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_mutual_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n os.makedirs(task_data_path + \"/train\", exist_ok=True)\n os.makedirs(task_data_path + \"/dev\", exist_ok=True)\n os.makedirs(task_data_path + \"/test\", exist_ok=True)\n num_files = {\"train\": 7088, \"dev\": 886, \"test\": 886}\n for phase in num_files:\n examples = []\n for i in range(num_files[phase]):\n file_name = phase + \"_\" + str(i + 1) + \".txt\"\n download_utils.download_file(\n f\"https://raw.githubusercontent.com/Nealcly/MuTual/\"\n + f\"master/data/mutual/{phase}/{file_name}\",\n os.path.join(task_data_path, phase, file_name),\n )\n for line in py_io.read_file_lines(os.path.join(task_data_path, phase, file_name)):\n examples.append(line)\n py_io.write_jsonl(examples, os.path.join(task_data_path, phase + \".jsonl\"))\n shutil.rmtree(os.path.join(task_data_path, phase))\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl\"),\n \"val\": os.path.join(task_data_path, \"dev.jsonl\"),\n \"test\": os.path.join(task_data_path, \"test.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_mutual_plus_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n os.makedirs(task_data_path + \"/train\", exist_ok=True)\n os.makedirs(task_data_path + \"/dev\", exist_ok=True)\n os.makedirs(task_data_path + \"/test\", exist_ok=True)\n num_files = {\"train\": 7088, \"dev\": 886, \"test\": 886}\n for phase in num_files:\n examples = []\n for i in range(num_files[phase]):\n file_name = phase + \"_\" + str(i + 1) + \".txt\"\n download_utils.download_file(\n f\"https://raw.githubusercontent.com/Nealcly/MuTual/\"\n + f\"master/data/mutual_plus/{phase}/{file_name}\",\n os.path.join(task_data_path, phase, file_name),\n )\n for line in py_io.read_file_lines(os.path.join(task_data_path, phase, file_name)):\n examples.append(line)\n py_io.write_jsonl(examples, os.path.join(task_data_path, phase + \".jsonl\"))\n shutil.rmtree(os.path.join(task_data_path, phase))\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl\"),\n \"val\": os.path.join(task_data_path, \"dev.jsonl\"),\n \"test\": os.path.join(task_data_path, \"test.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_fever_nli_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://www.dropbox.com/s/hylbuaovqwo2zav/nli_fever.zip?dl=1\", task_data_path,\n )\n # Since the FEVER NLI dataset doesn't have labels for the dev set, we also download the original\n # FEVER dev set and match example CIDs to obtain labels.\n orig_dev_path = os.path.join(task_data_path, \"fever-dev-temp.jsonl\")\n download_utils.download_file(\n \"https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_dev.jsonl\", orig_dev_path,\n )\n id_to_label = {}\n for line in py_io.read_jsonl(orig_dev_path):\n if \"id\" not in line:\n logging.warning(\"FEVER dev dataset is missing ID.\")\n continue\n if \"label\" not in line:\n logging.warning(\"FEVER dev dataset is missing label.\")\n continue\n id_to_label[line[\"id\"]] = line[\"label\"]\n os.remove(orig_dev_path)\n\n dev_path = os.path.join(task_data_path, \"nli_fever\", \"dev_fitems.jsonl\")\n dev_examples = []\n for line in py_io.read_jsonl(dev_path):\n if \"cid\" not in line:\n logging.warning(\"Data in {} is missing CID.\".format(dev_path))\n continue\n if int(line[\"cid\"]) not in id_to_label:\n logging.warning(\"Could not match CID {} to dev data.\".format(line[\"cid\"]))\n continue\n dev_example = line\n dev_example[\"label\"] = id_to_label[int(line[\"cid\"])]\n dev_examples.append(dev_example)\n py_io.write_jsonl(dev_examples, os.path.join(task_data_path, \"val.jsonl\"))\n os.remove(dev_path)\n\n for phase in [\"train\", \"test\"]:\n os.rename(\n os.path.join(task_data_path, \"nli_fever\", f\"{phase}_fitems.jsonl\"),\n os.path.join(task_data_path, f\"{phase}.jsonl\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"nli_fever\"))\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl\"),\n \"val\": os.path.join(task_data_path, \"val.jsonl\"),\n \"test\": os.path.join(task_data_path, \"test.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_swag_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://github.com/rowanz/swagaf/archive/master.zip\", task_data_path,\n )\n for phase in [\"train\", \"val\", \"test\"]:\n os.rename(\n os.path.join(task_data_path, \"swagaf-master\", \"data\", f\"{phase}.csv\"),\n os.path.join(task_data_path, f\"{phase}.csv\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"swagaf-master\"))\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.csv\"),\n \"val\": os.path.join(task_data_path, \"val.csv\"),\n \"test\": os.path.join(task_data_path, \"test.csv\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_qamr_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://github.com/uwnlp/qamr/archive/master.zip\", task_data_path,\n )\n data_phase_list = [\"train\", \"dev\", \"test\"]\n jiant_phase_list = [\"train\", \"val\", \"test\"]\n for data_phase, jiant_phase in zip(data_phase_list, jiant_phase_list):\n os.rename(\n os.path.join(task_data_path, \"qamr-master\", \"data\", \"filtered\", f\"{data_phase}.tsv\"),\n os.path.join(task_data_path, f\"{jiant_phase}.tsv\"),\n )\n os.rename(\n os.path.join(task_data_path, \"qamr-master\", \"data\", \"wiki-sentences.tsv\"),\n os.path.join(task_data_path, \"wiki-sentences.tsv\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"qamr-master\"))\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.tsv\"),\n \"val\": os.path.join(task_data_path, \"val.tsv\"),\n \"test\": os.path.join(task_data_path, \"test.tsv\"),\n \"wiki_dict\": os.path.join(task_data_path, \"wiki-sentences.tsv\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_qasrl_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_untar(\n \"http://qasrl.org/data/qasrl-v2.tar\", task_data_path,\n )\n data_phase_list = [\"train\", \"dev\", \"test\"]\n jiant_phase_list = [\"train\", \"val\", \"test\"]\n for data_phase, jiant_phase in zip(data_phase_list, jiant_phase_list):\n os.rename(\n os.path.join(task_data_path, \"qasrl-v2\", \"orig\", f\"{data_phase}.jsonl.gz\"),\n os.path.join(task_data_path, f\"{jiant_phase}.jsonl.gz\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"qasrl-v2\"))\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl.gz\"),\n \"val\": os.path.join(task_data_path, \"val.jsonl.gz\"),\n \"test\": os.path.join(task_data_path, \"test.jsonl.gz\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_newsqa_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n def get_consensus_answer(row_):\n answer_char_start, answer_char_end = None, None\n if row_.validated_answers:\n validated_answers_ = json.loads(row.validated_answers)\n answer_, max_count = max(validated_answers_.items(), key=itemgetter(1))\n total_count = sum(validated_answers_.values())\n if max_count >= total_count / 2.0:\n if answer_ != \"none\" and answer_ != \"bad_question\":\n answer_char_start, answer_char_end = map(int, answer_.split(\":\"))\n else:\n # No valid answer.\n pass\n else:\n # Check row_.answer_char_ranges for most common answer.\n # No validation was done so there must be an answer with consensus.\n answers = Counter()\n for user_answer in row_.answer_char_ranges.split(\"|\"):\n for ans in user_answer.split(\",\"):\n answers[ans] += 1\n top_answer = answers.most_common(1)\n if top_answer:\n top_answer, _ = top_answer[0]\n if \":\" in top_answer:\n answer_char_start, answer_char_end = map(int, top_answer.split(\":\"))\n\n return answer_char_start, answer_char_end\n\n def load_combined(path):\n result = pd.read_csv(\n path,\n encoding=\"utf-8\",\n dtype=dict(is_answer_absent=float),\n na_values=dict(question=[], story_text=[], validated_answers=[]),\n keep_default_na=False,\n )\n\n if \"story_text\" in result.keys():\n for row_ in display.tqdm(\n result.itertuples(), total=len(result), desc=\"Adjusting story texts\"\n ):\n story_text_ = row_.story_text.replace(\"\\r\\n\", \"\\n\")\n result.at[row_.Index, \"story_text\"] = story_text_\n\n return result\n\n def _map_answers(answers):\n result = []\n for a in answers.split(\"|\"):\n user_answers = []\n result.append(dict(sourcerAnswers=user_answers))\n for r in a.split(\",\"):\n if r == \"None\":\n user_answers.append(dict(noAnswer=True))\n else:\n start_, end_ = map(int, r.split(\":\"))\n user_answers.append(dict(s=start_, e=end_))\n return result\n\n def strip_empty_strings(strings):\n while strings and strings[-1] == \"\":\n del strings[-1]\n return strings\n\n # Require: cnn_stories.tgz\n cnn_stories_path = os.path.join(task_data_path, \"cnn_stories.tgz\")\n assert os.path.exists(cnn_stories_path), (\n \"Download CNN Stories from https://cs.nyu.edu/~kcho/DMQA/ and save to \" + cnn_stories_path\n )\n # Require: newsqa-data-v1/newsqa-data-v1.csv\n dataset_path = os.path.join(task_data_path, \"newsqa-data-v1\", \"newsqa-data-v1.csv\")\n if os.path.exists(dataset_path):\n pass\n elif os.path.exists(os.path.join(task_data_path, \"newsqa-data-v1.zip\")):\n download_utils.unzip_file(\n zip_path=os.path.join(task_data_path, \"newsqa-data-v1.zip\"),\n extract_location=task_data_path,\n delete=False,\n )\n else:\n raise AssertionError(\n \"Download https://www.microsoft.com/en-us/research/project/newsqa-dataset/#!download\"\n \" and save to \" + os.path.join(task_data_path, \"newsqa-data-v1.zip\")\n )\n\n # Download auxiliary data\n os.makedirs(task_data_path, exist_ok=True)\n file_name_list = [\n \"train_story_ids.csv\",\n \"dev_story_ids.csv\",\n \"test_story_ids.csv\",\n \"stories_requiring_extra_newline.csv\",\n \"stories_requiring_two_extra_newlines.csv\",\n \"stories_to_decode_specially.csv\",\n ]\n for file_name in file_name_list:\n download_utils.download_file(\n f\"https://raw.githubusercontent.com/Maluuba/newsqa/master/maluuba/newsqa/{file_name}\",\n os.path.join(task_data_path, file_name),\n )\n\n dataset = load_combined(dataset_path)\n remaining_story_ids = set(dataset[\"story_id\"])\n with open(\n os.path.join(task_data_path, \"stories_requiring_extra_newline.csv\"), \"r\", encoding=\"utf-8\"\n ) as f:\n stories_requiring_extra_newline = set(f.read().split(\"\\n\"))\n\n with open(\n os.path.join(task_data_path, \"stories_requiring_two_extra_newlines.csv\"),\n \"r\",\n encoding=\"utf-8\",\n ) as f:\n stories_requiring_two_extra_newlines = set(f.read().split(\"\\n\"))\n\n with open(\n os.path.join(task_data_path, \"stories_to_decode_specially.csv\"), \"r\", encoding=\"utf-8\"\n ) as f:\n stories_to_decode_specially = set(f.read().split(\"\\n\"))\n\n # Start combining data files\n story_id_to_text = {}\n with tarfile.open(cnn_stories_path, mode=\"r:gz\", encoding=\"utf-8\") as t:\n highlight_indicator = \"@highlight\"\n\n copyright_line_pattern = re.compile(\n \"^(Copyright|Entire contents of this article copyright, )\"\n )\n with display.tqdm(total=len(remaining_story_ids), desc=\"Getting story texts\") as pbar:\n for member in t.getmembers():\n story_id = member.name\n if story_id in remaining_story_ids:\n remaining_story_ids.remove(story_id)\n story_file = t.extractfile(member)\n\n # Correct discrepancies in stories.\n # Problems are caused by using several programming languages and libraries.\n # When ingesting the stories, we started with Python 2.\n # After dealing with unicode issues, we tried switching to Python 3.\n # That caused inconsistency problems so we switched back to Python 2.\n # Furthermore, when crowdsourcing, JavaScript and HTML templating perturbed\n # the stories.\n # So here we map the text to be compatible with the indices.\n lines = map(lambda s_: s_.strip().decode(\"utf-8\"), story_file.readlines())\n\n story_file.close()\n lines = list(lines)\n highlights_start = lines.index(highlight_indicator)\n story_lines = lines[:highlights_start]\n story_lines = strip_empty_strings(story_lines)\n while len(story_lines) > 1 and copyright_line_pattern.search(story_lines[-1]):\n story_lines = strip_empty_strings(story_lines[:-2])\n if story_id in stories_requiring_two_extra_newlines:\n story_text = \"\\n\\n\\n\".join(story_lines)\n elif story_id in stories_requiring_extra_newline:\n story_text = \"\\n\\n\".join(story_lines)\n else:\n story_text = \"\\n\".join(story_lines)\n\n story_text = story_text.replace(\"\\xe2\\x80\\xa2\", \"\\xe2\\u20ac\\xa2\")\n story_text = story_text.replace(\"\\xe2\\x82\\xac\", \"\\xe2\\u201a\\xac\")\n story_text = story_text.replace(\"\\r\", \"\\n\")\n if story_id in stories_to_decode_specially:\n story_text = story_text.replace(\"\\xe9\", \"\\xc3\\xa9\")\n story_id_to_text[story_id] = story_text\n\n pbar.update()\n\n if len(remaining_story_ids) == 0:\n break\n\n for row in display.tqdm(dataset.itertuples(), total=len(dataset), desc=\"Setting story texts\"):\n # Set story_text since we cannot include it in the dataset.\n story_text = story_id_to_text[row.story_id]\n dataset.at[row.Index, \"story_text\"] = story_text\n\n # Handle endings that are too large.\n answer_char_ranges = row.answer_char_ranges.split(\"|\")\n updated_answer_char_ranges = []\n ranges_updated = False\n for user_answer_char_ranges in answer_char_ranges:\n updated_user_answer_char_ranges = []\n for char_range in user_answer_char_ranges.split(\",\"):\n if char_range != \"None\":\n start, end = map(int, char_range.split(\":\"))\n if end > len(story_text):\n ranges_updated = True\n end = len(story_text)\n if start < end:\n updated_user_answer_char_ranges.append(\"%d:%d\" % (start, end))\n else:\n # It's unclear why but sometimes the end is after the start.\n # We'll filter these out.\n ranges_updated = True\n else:\n updated_user_answer_char_ranges.append(char_range)\n if updated_user_answer_char_ranges:\n updated_user_answer_char_ranges = \",\".join(updated_user_answer_char_ranges)\n updated_answer_char_ranges.append(updated_user_answer_char_ranges)\n if ranges_updated:\n updated_answer_char_ranges = \"|\".join(updated_answer_char_ranges)\n dataset.at[row.Index, \"answer_char_ranges\"] = updated_answer_char_ranges\n\n if row.validated_answers and not pd.isnull(row.validated_answers):\n updated_validated_answers = {}\n validated_answers = json.loads(row.validated_answers)\n for char_range, count in validated_answers.items():\n if \":\" in char_range:\n start, end = map(int, char_range.split(\":\"))\n if end > len(story_text):\n ranges_updated = True\n end = len(story_text)\n if start < end:\n char_range = \"{}:{}\".format(start, end)\n updated_validated_answers[char_range] = count\n else:\n # It's unclear why but sometimes the end is after the start.\n # We'll filter these out.\n ranges_updated = True\n else:\n updated_validated_answers[char_range] = count\n if ranges_updated:\n updated_validated_answers = json.dumps(\n updated_validated_answers, ensure_ascii=False, separators=(\",\", \":\")\n )\n dataset.at[row.Index, \"validated_answers\"] = updated_validated_answers\n\n # Process Splits\n data = []\n cache = dict()\n\n train_story_ids = set(\n pd.read_csv(os.path.join(task_data_path, \"train_story_ids.csv\"))[\"story_id\"].values\n )\n dev_story_ids = set(\n pd.read_csv(os.path.join(task_data_path, \"dev_story_ids.csv\"))[\"story_id\"].values\n )\n test_story_ids = set(\n pd.read_csv(os.path.join(task_data_path, \"test_story_ids.csv\"))[\"story_id\"].values\n )\n\n def _get_data_type(story_id_):\n if story_id_ in train_story_ids:\n return \"train\"\n elif story_id_ in dev_story_ids:\n return \"dev\"\n elif story_id_ in test_story_ids:\n return \"test\"\n else:\n return ValueError(\"{} not found in any story ID set.\".format(story_id))\n\n for row in display.tqdm(dataset.itertuples(), total=len(dataset), desc=\"Building json\"):\n questions = cache.get(row.story_id)\n if questions is None:\n questions = []\n datum = dict(\n storyId=row.story_id,\n type=_get_data_type(row.story_id),\n text=row.story_text,\n questions=questions,\n )\n cache[row.story_id] = questions\n data.append(datum)\n q = dict(\n q=row.question,\n answers=_map_answers(row.answer_char_ranges),\n isAnswerAbsent=row.is_answer_absent,\n )\n if row.is_question_bad != \"?\":\n q[\"isQuestionBad\"] = float(row.is_question_bad)\n if row.validated_answers and not pd.isnull(row.validated_answers):\n validated_answers = json.loads(row.validated_answers)\n q[\"validatedAnswers\"] = []\n for answer, count in validated_answers.items():\n answer_item = dict(count=count)\n if answer == \"none\":\n answer_item[\"noAnswer\"] = True\n elif answer == \"bad_question\":\n answer_item[\"badQuestion\"] = True\n else:\n s, e = map(int, answer.split(\":\"))\n answer_item[\"s\"] = s\n answer_item[\"e\"] = e\n q[\"validatedAnswers\"].append(answer_item)\n consensus_start, consensus_end = get_consensus_answer(row)\n if consensus_start is None and consensus_end is None:\n if q.get(\"isQuestionBad\", 0) >= 0.5:\n q[\"consensus\"] = dict(badQuestion=True)\n else:\n q[\"consensus\"] = dict(noAnswer=True)\n else:\n q[\"consensus\"] = dict(s=consensus_start, e=consensus_end)\n questions.append(q)\n\n phase_dict = {\n \"train\": [],\n \"val\": [],\n \"test\": [],\n }\n phase_map = {\"train\": \"train\", \"dev\": \"val\", \"test\": \"test\"}\n for entry in data:\n phase = phase_map[entry[\"type\"]]\n output_entry = {\"text\": entry[\"text\"], \"storyId\": entry[\"storyId\"], \"qas\": []}\n for qn in entry[\"questions\"]:\n if \"badQuestion\" in qn[\"consensus\"] or \"noAnswer\" in qn[\"consensus\"]:\n continue\n output_entry[\"qas\"].append({\"question\": qn[\"q\"], \"answer\": qn[\"consensus\"]})\n phase_dict[phase].append(output_entry)\n for phase, phase_data in phase_dict.items():\n py_io.write_jsonl(phase_data, os.path.join(task_data_path, f\"{phase}.jsonl\"))\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl\"),\n \"val\": os.path.join(task_data_path, \"val.jsonl\"),\n \"test\": os.path.join(task_data_path, \"val.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n for file_name in file_name_list:\n os.remove(os.path.join(task_data_path, file_name))\n\n\ndef download_mrqa_natural_questions_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_file(\n \"https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NaturalQuestionsShort.jsonl.gz\",\n os.path.join(task_data_path, \"train.jsonl.gz\"),\n )\n download_utils.download_file(\n \"https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NaturalQuestionsShort.jsonl.gz\",\n os.path.join(task_data_path, \"val.jsonl.gz\"),\n )\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl.gz\"),\n \"val\": os.path.join(task_data_path, \"val.jsonl.gz\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_piqa_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_file(\n \"https://yonatanbisk.com/piqa/data/train.jsonl\",\n os.path.join(task_data_path, \"train.jsonl\"),\n )\n download_utils.download_file(\n \"https://yonatanbisk.com/piqa/data/train-labels.lst\",\n os.path.join(task_data_path, \"train-labels.lst\"),\n )\n download_utils.download_file(\n \"https://yonatanbisk.com/piqa/data/valid.jsonl\",\n os.path.join(task_data_path, \"valid.jsonl\"),\n )\n download_utils.download_file(\n \"https://yonatanbisk.com/piqa/data/valid-labels.lst\",\n os.path.join(task_data_path, \"valid-labels.lst\"),\n )\n download_utils.download_file(\n \"https://yonatanbisk.com/piqa/data/tests.jsonl\",\n os.path.join(task_data_path, \"tests.jsonl\"),\n )\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.jsonl\"),\n \"train_labels\": os.path.join(task_data_path, \"train-labels.lst\"),\n \"val\": os.path.join(task_data_path, \"valid.jsonl\"),\n \"val_labels\": os.path.join(task_data_path, \"valid-labels.lst\"),\n \"test\": os.path.join(task_data_path, \"tests.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_winogrande_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_unzip(\n \"https://storage.googleapis.com/ai2-mosaic/public/winogrande/winogrande_1.1.zip\",\n task_data_path,\n )\n\n task_data_path = os.path.join(task_data_path, \"winogrande_1.1\")\n\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train_xl.jsonl\"),\n \"train_labels\": os.path.join(task_data_path, \"train_xl-labels.lst\"),\n \"train_xs\": os.path.join(task_data_path, \"train_xs.jsonl\"),\n \"train_xs_labels\": os.path.join(task_data_path, \"train_xs-labels.lst\"),\n \"train_s\": os.path.join(task_data_path, \"train_s.jsonl\"),\n \"train_s_labels\": os.path.join(task_data_path, \"train_s-labels.lst\"),\n \"train_m\": os.path.join(task_data_path, \"train_m.jsonl\"),\n \"train_m_labels\": os.path.join(task_data_path, \"train_m-labels.lst\"),\n \"train_l\": os.path.join(task_data_path, \"train_l.jsonl\"),\n \"train_l_labels\": os.path.join(task_data_path, \"train_l-labels.lst\"),\n \"train_xl\": os.path.join(task_data_path, \"train_xl.jsonl\"),\n \"train_xl_labels\": os.path.join(task_data_path, \"train_xl-labels.lst\"),\n \"val\": os.path.join(task_data_path, \"dev.jsonl\"),\n \"val_labels\": os.path.join(task_data_path, \"dev-labels.lst\"),\n \"test\": os.path.join(task_data_path, \"test.jsonl\"),\n },\n \"name\": task_name,\n },\n path=task_config_path,\n )\n\n\ndef download_ropes_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n os.makedirs(task_data_path, exist_ok=True)\n download_utils.download_and_untar(\n \"https://ropes-dataset.s3-us-west-2.amazonaws.com/train_and_dev/\"\n \"ropes-train-dev-v1.0.tar.gz\",\n task_data_path,\n )\n data_phase_list = [\"train\", \"dev\"]\n jiant_phase_list = [\"train\", \"val\"]\n for data_phase, jiant_phase in zip(data_phase_list, jiant_phase_list):\n os.rename(\n os.path.join(task_data_path, \"ropes-train-dev-v1.0\", f\"{data_phase}-v1.0.json\"),\n os.path.join(task_data_path, f\"{jiant_phase}.json\"),\n )\n shutil.rmtree(os.path.join(task_data_path, \"ropes-train-dev-v1.0\"))\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\n \"train\": os.path.join(task_data_path, \"train.json\"),\n \"val\": os.path.join(task_data_path, \"val.json\"),\n },\n \"name\": task_name,\n \"kwargs\": {\"include_background\": True},\n },\n path=task_config_path,\n )\n\n\ndef download_acceptability_judgments_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n dataset_name = {\n \"acceptability_definiteness\": \"definiteness\",\n \"acceptability_coord\": \"coordinating-conjunctions\",\n \"acceptability_whwords\": \"whwords\",\n \"acceptability_eos\": \"eos\",\n }[task_name]\n os.makedirs(task_data_path, exist_ok=True)\n # data contains all train/val/test examples\n # metadata contains the split indicators\n # (there are 10 CV folds, we use fold1 by default, see below)\n data_path = os.path.join(task_data_path, \"data.json\")\n metadata_path = os.path.join(task_data_path, \"metadata.json\")\n download_utils.download_file(\n url=\"https://raw.githubusercontent.com/decompositional-semantics-initiative/DNC/master/\"\n f\"function_words/ACCEPTABILITY/acceptability-{dataset_name}_data.json\",\n file_path=data_path,\n )\n download_utils.download_file(\n url=\"https://raw.githubusercontent.com/decompositional-semantics-initiative/DNC/master/\"\n f\"function_words/ACCEPTABILITY/acceptability-{dataset_name}_metadata.json\",\n file_path=metadata_path,\n )\n py_io.write_json(\n data={\n \"task\": task_name,\n \"paths\": {\"data\": data_path, \"metadata\": metadata_path},\n \"name\": task_name,\n \"kwargs\": {\"fold\": \"fold1\"}, # use fold1 (out of 10) by default\n },\n path=task_config_path,\n )\n\ndef download_rudepression_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n path = \"depression_hf\"\n examples_dict = download_utils.convert_hf_local_dataset_to_examples(\n path=path,\n name=\"rudepression\"\n )\n paths_dict = download_utils.write_examples_to_jsonls(\n examples_dict=examples_dict, task_data_path=task_data_path,\n )\n jiant_task_name = \"rudepression\"\n py_io.write_json(\n data={\"task\": jiant_task_name, \"paths\": paths_dict, \"name\": task_name},\n path=task_config_path,\n )\n\n\ndef download_rusentiment_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n path = \"rusentiment_hf\"\n examples_dict = download_utils.convert_hf_local_dataset_to_examples(\n path=path,\n name=\"rusentiment\"\n )\n paths_dict = download_utils.write_examples_to_jsonls(\n examples_dict=examples_dict, task_data_path=task_data_path,\n )\n jiant_task_name = \"rusentiment\"\n py_io.write_json(\n data={\"task\": jiant_task_name, \"paths\": paths_dict, \"name\": task_name},\n path=task_config_path,\n )\n\n\ndef download_ruhate_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n path = \"hate_hf\"\n examples_dict = download_utils.convert_hf_local_dataset_to_examples(\n path=path,\n name=\"ruhate\"\n )\n paths_dict = download_utils.write_examples_to_jsonls(\n examples_dict=examples_dict, task_data_path=task_data_path,\n )\n jiant_task_name = \"ruhate\"\n py_io.write_json(\n data={\"task\": jiant_task_name, \"paths\": paths_dict, \"name\": task_name},\n path=task_config_path,\n )\n\ndef download_ruhumor_data_and_write_config(task_name: str, task_data_path: str, task_config_path: str):\n path = \"fun_hf\"\n examples_dict = download_utils.convert_hf_local_dataset_to_examples(\n path=path,\n name=\"ruhumor\"\n )\n paths_dict = download_utils.write_examples_to_jsonls(\n examples_dict=examples_dict, task_data_path=task_data_path,\n )\n jiant_task_name = \"ruhumor\"\n py_io.write_json(\n data={\"task\": jiant_task_name, \"paths\": paths_dict, \"name\": task_name},\n path=task_config_path,\n )\n\n\n\ndef download_senteval_data_and_write_config(\n task_name: str, task_data_path: str, task_config_path: str\n):\n name_map = {\n \"senteval_bigram_shift\": \"bigram_shift\",\n \"senteval_coordination_inversion\": \"coordination_inversion\",\n \"senteval_obj_number\": \"obj_number\",\n \"senteval_odd_man_out\": \"odd_man_out\",\n \"senteval_past_present\": \"past_present\",\n \"senteval_sentence_length\": \"sentence_length\",\n \"senteval_subj_number\": \"subj_number\",\n \"senteval_top_constituents\": \"top_constituents\",\n \"senteval_tree_depth\": \"tree_depth\",\n \"senteval_word_content\": \"word_content\",\n }\n dataset_name = name_map[task_name]\n os.makedirs(task_data_path, exist_ok=True)\n # data contains all train/val/test examples, first column indicates the split\n data_path = os.path.join(task_data_path, \"data.tsv\")\n download_utils.download_file(\n url=\"https://raw.githubusercontent.com/facebookresearch/SentEval/master/data/probing/\"\n f\"{dataset_name}.txt\",\n file_path=data_path,\n )\n py_io.write_json(\n data={\"task\": task_name, \"paths\": {\"data\": data_path}, \"name\": task_name},\n path=task_config_path,\n )\n" ]
[ [ "pandas.isnull" ] ]
rauldiaz/PointNetLK
[ "23e26d2668d82578a5c9a129555f52e10104499b" ]
[ "experiments/icp.py" ]
[ "\"\"\" ICP algorithm\n\n References:\n (ICP)\n [1] Paul J. Besl and Neil D. McKay,\n \"A method for registration of 3-D shapes\",\n PAMI Vol. 14, Issue 2, pp. 239-256, 1992.\n (SVD)\n [2] K. S. Arun, T. S. Huang and S. D. Blostein,\n \"Least-Squares Fitting of Two 3-D Point Sets\",\n PAMI Vol. 9, Issue 5, pp.698--700, 1987\n\"\"\"\nimport numpy as np\nfrom scipy.spatial import KDTree\n\ndef _icp_find_rigid_transform(p_from, p_target):\n A, B = np.copy(p_from), np.copy(p_target)\n\n centroid_A = np.mean(A, axis=0)\n centroid_B = np.mean(B, axis=0)\n\n A -= centroid_A\n B -= centroid_B\n\n H = np.dot(A.T, B)\n U, S, Vt = np.linalg.svd(H)\n R = np.dot(Vt.T, U.T)\n\n # special reflection case\n if np.linalg.det(R) < 0:\n Vt[2,:] *= -1\n R = np.dot(Vt.T, U.T)\n\n t = np.dot(-R, centroid_A) + centroid_B\n\n return R, t\n\ndef _icp_Rt_to_matrix(R, t):\n # matrix M = [R, t; 0, 1]\n Rt = np.concatenate((R, np.expand_dims(t.T, axis=-1)), axis=1)\n a = np.concatenate((np.zeros_like(t), np.ones(1)))\n M = np.concatenate((Rt, np.expand_dims(a, axis=0)), axis=0)\n return M\n\nclass ICP:\n \"\"\" Estimate a rigid-body transform g such that:\n p0 = g.p1\n \"\"\"\n def __init__(self, p0, p1):\n \"\"\" p0.shape == (N, 3)\n p1.shape == (N, 3)\n \"\"\"\n self.p0 = p0\n self.p1 = p1\n leafsize = 1000\n self.nearest = KDTree(self.p0, leafsize=leafsize)\n self.g_series = None\n\n def compute(self, max_iter):\n ftol = 1.0e-7\n dim_k = self.p0.shape[1]\n g = np.eye(dim_k + 1, dtype=self.p0.dtype)\n p = np.copy(self.p1)\n\n self.g_series = np.zeros((max_iter + 1, dim_k + 1, dim_k + 1), dtype=g.dtype)\n self.g_series[0, :, :] = g\n\n itr = -1\n for itr in range(max_iter):\n neighbor_idx = self.nearest.query(p)[1]\n targets = self.p0[neighbor_idx]\n R, t = _icp_find_rigid_transform(p, targets)\n\n new_p = np.dot(R, p.T).T + t\n if np.sum(np.abs(p - new_p)) < ftol:\n break\n\n p = np.copy(new_p)\n dg = _icp_Rt_to_matrix(R, t)\n new_g = np.dot(dg, g)\n g = np.copy(new_g)\n self.g_series[itr + 1, :, :] = g\n\n self.g_series[(itr+1):, :, :] = g\n\n return g, p, (itr + 1)\n\n\n\ndef icp_test():\n from math import sin, cos\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n Y, X = np.mgrid[0:100:5, 0:100:5]\n Z = Y ** 2 + X ** 2\n A = np.vstack([Y.reshape(-1), X.reshape(-1), Z.reshape(-1)]).T\n\n R = np.array([\n [cos(-0.279), -sin(-0.279), 0],\n [sin(-0.279), cos(-0.279), 0],\n [0, 0, 1]\n ])\n #R = np.eye(3)\n\n t = np.array([5.0, 20.0, 10.0])\n #t = np.array([0.0, 0.0, 0.0])\n\n B = np.dot(R, A.T).T + t\n A = A.astype(B.dtype)\n\n icp = ICP(A, B)\n matrix, points, itr = icp.compute(10)\n\n print(itr)\n print(icp.g_series)\n print(icp.g_series[itr])\n print(matrix)\n print(R.T)\n print(np.dot(-R.T, t))\n\n fig = plt.figure()\n #ax = Axes3D(fig)\n ax = fig.add_subplot(111, projection='3d')\n\n ax.set_label(\"x - axis\")\n ax.set_label(\"y - axis\")\n ax.set_label(\"z - axis\")\n\n ax.plot(A[:,1], A[:,0], A[:,2], \"o\", color=\"#cccccc\", ms=4, mew=0.5)\n ax.plot(points[:,1], points[:,0], points[:,2], \"o\", color=\"#00cccc\", ms=4, mew=0.5)\n ax.plot(B[:,0], B[:,1], B[:,2], \"o\", color=\"#ff0000\", ms=4, mew=0.5)\n\n plt.show()\n\nif __name__ == '__main__':\n icp_test()\n\n#EOF" ]
[ [ "numpy.dot", "numpy.linalg.svd", "numpy.expand_dims", "numpy.abs", "numpy.eye", "numpy.ones", "numpy.linalg.det", "numpy.copy", "scipy.spatial.KDTree", "numpy.mean", "numpy.zeros_like", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
austospumanto/kedro
[ "4f89c8fd32c6660affa5ff7d4fe2b096d5de9c95" ]
[ "kedro/extras/datasets/pandas/xml_dataset.py" ]
[ "# Copyright 2021 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"``XMLDataSet`` loads/saves data from/to a XML file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses pandas to handle the XML file.\n\"\"\"\nimport logging\nfrom copy import deepcopy\nfrom io import BytesIO\nfrom pathlib import PurePosixPath\nfrom typing import Any, Dict\n\nimport fsspec\nimport pandas as pd\n\nfrom kedro.io.core import (\n PROTOCOL_DELIMITER,\n AbstractVersionedDataSet,\n DataSetError,\n Version,\n get_filepath_str,\n get_protocol_and_path,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass XMLDataSet(AbstractVersionedDataSet):\n \"\"\"``XMLDataSet`` loads/saves data from/to a XML file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses pandas to handle the XML file.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.pandas import XMLDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>>\n >>> # data_set = XMLDataSet(filepath=\"gcs://bucket/test.xml\")\n >>> data_set = XMLDataSet(filepath=\"test.xml\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]\n DEFAULT_SAVE_ARGS = {\"index\": False} # type: Dict[str, Any]\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n filepath: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n credentials: Dict[str, Any] = None,\n fs_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new instance of ``XMLDataSet`` pointing to a concrete XML file\n on a specific filesystem.\n\n Args:\n filepath: Filepath in POSIX format to a XML file prefixed with a protocol like `s3://`.\n If prefix is not provided, `file` protocol (local filesystem) will be used.\n The prefix should be any protocol supported by ``fsspec``.\n Note: `http(s)` doesn't support versioning.\n load_args: Pandas options for loading XML files.\n Here you can find all available arguments:\n https://pandas.pydata.org/docs/reference/api/pandas.read_xml.html\n All defaults are preserved.\n save_args: Pandas options for saving XML files.\n Here you can find all available arguments:\n https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_xml.html\n All defaults are preserved, but \"index\", which is set to False.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n credentials: Credentials required to get access to the underlying filesystem.\n E.g. for ``GCSFileSystem`` it should look like `{\"token\": None}`.\n fs_args: Extra arguments to pass into underlying filesystem class constructor\n (e.g. `{\"project\": \"my-project\"}` for ``GCSFileSystem``).\n \"\"\"\n _fs_args = deepcopy(fs_args) or {}\n _credentials = deepcopy(credentials) or {}\n\n protocol, path = get_protocol_and_path(filepath, version)\n if protocol == \"file\":\n _fs_args.setdefault(\"auto_mkdir\", True)\n\n self._protocol = protocol\n self._storage_options = {**_credentials, **_fs_args}\n self._fs = fsspec.filesystem(self._protocol, **self._storage_options)\n\n super().__init__(\n filepath=PurePosixPath(path),\n version=version,\n exists_function=self._fs.exists,\n glob_function=self._fs.glob,\n )\n\n # Handle default load and save arguments\n self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)\n if save_args is not None:\n self._save_args.update(save_args)\n\n if \"storage_options\" in self._save_args or \"storage_options\" in self._load_args:\n logger.warning(\n \"Dropping `storage_options` for %s, \"\n \"please specify them under `fs_args` or `credentials`.\",\n self._filepath,\n )\n self._save_args.pop(\"storage_options\", None)\n self._load_args.pop(\"storage_options\", None)\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n protocol=self._protocol,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def _load(self) -> pd.DataFrame:\n load_path = str(self._get_load_path())\n if self._protocol == \"file\":\n # file:// protocol seems to misbehave on Windows\n # (<urlopen error file not on local host>),\n # so we don't join that back to the filepath;\n # storage_options also don't work with local paths\n return pd.read_xml(load_path, **self._load_args)\n\n load_path = f\"{self._protocol}{PROTOCOL_DELIMITER}{load_path}\"\n return pd.read_xml(\n load_path, storage_options=self._storage_options, **self._load_args\n )\n\n def _save(self, data: pd.DataFrame) -> None:\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n buf = BytesIO()\n data.to_xml(path_or_buffer=buf, **self._save_args)\n\n with self._fs.open(save_path, mode=\"wb\") as fs_file:\n fs_file.write(buf.getvalue())\n\n self._invalidate_cache()\n\n def _exists(self) -> bool:\n try:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n except DataSetError:\n return False\n\n return self._fs.exists(load_path)\n\n def _release(self) -> None:\n super()._release()\n self._invalidate_cache()\n\n def _invalidate_cache(self) -> None:\n \"\"\"Invalidate underlying filesystem caches.\"\"\"\n filepath = get_filepath_str(self._filepath, self._protocol)\n self._fs.invalidate_cache(filepath)\n" ]
[ [ "pandas.read_xml" ] ]
CorbinFoucart/FEMexperiment
[ "9bad34d9ed7cbdd740e3a4b67f433779dd53b264" ]
[ "codes/src/msh/mesh.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@package src.msh.mesh\nCreated on Sun Apr 24 20:51:08 2011\nContains the Mesh classes\n\n@author: Matt Ueckermann\n@author: Corbin Foucart\n@note While the elements are certainly numbered CCW, the edges may not be. The\n edge numbering comes from mk_basis.int_el_pqr, and is done in the frame\n of references of the master element, which may not be the same as how\n the element is numbered in real space. Tricky...\n\n\"\"\"\nimport numpy as np\nimport src.pyutil as util\nimport code, pdb\nimport src.msh.util as mshu\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom src.msh.util import fix, simpvol2D, connect_elm\n#import msh.plot as plt\nimport copy\n\nclass Mesh():\n \"\"\"\n Parent class of Mesh2D and Mesh3D for shared functionality.\n\n Aim is to prevent duplication of code common to both Mesh2D and Mesh3D\n classes, since they otherwise would not share a parent.\n \"\"\"\n\n def get_global_vertex_numbers(self, globEdNum=None):\n \"\"\" returns the global vertex numbers given a global edge nubmer of the\n mesh\n\n Recall that ed2ed has the following structure for each row\n (corresponding to a single edge in the mesh:\n\n [(+) elm #, (+) side local edge #, (-) elm#, (-) elm loc edge #, vertex\n 1, vertex 2...] (0:2) (2:4)\n (4:) \"\"\"\n return self.ed2ed[globEdNum, 4:].ravel()\n\n def get_adjacent_elm(self, globalEdgeNum, side):\n \"\"\" returns the adjacent element number on the specified side of the\n given global edge number @param globalEdgeNum - the global edge number\n @param side - either 'LEFT' or 'RIGHT' where in ed2ed the LEFT element\n is given first, then the right element.\n\n # to get the left adjacent element of glboal edge 23\n mesh.get_adjacent_elm(23, 'LEFT') \"\"\"\n # assign the index based on the structure of the ed2ed rows\n if side == 'LEFT':\n elmIdx = 0\n elif side == 'RIGHT':\n elmIdx = 2\n else:\n raise ValueError(\"Invalid edge side specification\")\n return self.ed2ed[globalEdgeNum, elmIdx].ravel()[0]\n\n def get_adjacent_elm_local_edge(self, globalEdgeNum, side):\n \"\"\"\n For a global edge, returns the corresponding local edge number for the\n adjacent element on the specified side.\n\n @param globalEdgeNum - the global edge number @param side - either\n 'LEFT' or 'RIGHT' where in ed2ed the LEFT element is given first, then\n the right element.\n\n Each global edge has a left element, and possibly a right element (not\n in the case of a boundary). In the case where the adjacent element\n exists, the global edge is spatially corresponds to one of the local\n edges on the adjacent element. This information is contained in the\n ed2ed connectivity array, and this helper function represents a way for\n the user of the mesh class to retrieve this information without knowing\n the internals of the mesh data structure.\n\n NOTE: if the element to the right hand side does not exist (boundary\n case), then the return value will be -1.\n \"\"\"\n # assign the index based on the structure of the ed2ed rows\n if side == 'LEFT':\n elmIdx = 0\n elif side == 'RIGHT':\n elmIdx = 2\n else:\n raise ValueError(\"Invalid edge side specification\")\n return self.ed2ed[globalEdgeNum, elmIdx + 1].ravel()[0]\n\nclass Mesh2D(Mesh):\n elm_enumeration = {\"TRIANGLE\":0, \"QUAD\":1}\n def __init__(self, elm, vert):\n '''\n We are assuming only two types of elements here. 2D triangles and 2D\n quadrilaterals.\n\n @param elm (\\c int) Numpy array that defines the elements. Each row\n is an element, and each column indicates a vertex of the\n element. That is elm[0, 0] gives the global vertex number\n of vertex 0 of element 0.\n @param vert (\\c float) Numpy array that gives the spatial coordinates\n of the global vertices.\n\n @note The elm and vert inputs may be inputted as a tuple, for\n convenience.\n @code\n >>> import msh\n >>> t,p = msh.mk.struct2D()\n >>> mesh = msh.Mesh2D(t,p)\n >>> #OR equivalently\n >>> mesh = msh.Mesh2D(msh.mk.struct2D())\n @endcode\n\n @author Matt Ueckermann\n '''\n dim = 2\n elm, vert = fix(elm, vert) # CCW ordering of element nodes\n elm = mshu.sort_by_element_type(elm) # Tri first, then quads\n\n n_elm = len(elm)\n n_tri = (elm[:,3] < 0).sum()\n\n n_elm_type = [n_tri, n_elm - n_tri]\n if n_tri == 0: n_elm_type = [n_elm] # all quads\n if n_elm == n_tri: n_elm_type = [n_elm] # all tris\n\n #Now sort by vertex number\n if n_tri > 0: elm[0:n_tri, :] = mshu.sort_by_vertex_number(elm[0:n_tri, :])\n if n_tri < n_elm: elm[n_tri:n_elm, :] = mshu.sort_by_vertex_number(\n elm[n_tri:n_elm, :])\n\n elm_type = np.zeros(n_elm, dtype=int)\n if n_tri == n_elm: u_elm_type = [self.elm_enumeration[\"TRIANGLE\"]]\n elif n_tri == 0: u_elm_type = [self.elm_enumeration[\"QUAD\"]]\n else:\n u_elm_type = [self.elm_enumeration[\"TRIANGLE\"],\n self.elm_enumeration[\"QUAD\"]]\n elm_type[n_tri:n_elm] = self.elm_enumeration[\"QUAD\"]\n\n #create the connectivity matrixes (Which needs the element enumerated type)\n elm2elm, ed2ed = connect_elm(elm, np.array(u_elm_type)[elm_type], dim,\n u_elm_type)\n\n ## The element connectivity matrix. elm2elm[i, j] gives the element\n # number which is connected to element i, through edge j of element i.\n self.elm2elm = elm2elm\n\n ## The edge connectivity matrix.\n # ed2ed[i, 0:2] gives the [element #, local edge #] of the plus-side element.\n # ed2ed[i, 2:4] gives the [element #, local edge #] of the minus-side element.\n # ed2ed[i, 4:] gives the vertices that make up the edge.\n # numbered CCW with outward-point normal (according to Right-hand rule)\n # CF: This seems to indicate that left is plus, right is minus\n self.ed2ed = ed2ed\n\n ##A boolean array used to select the interior edges only\n self.ids_interior_ed = (self.ed2ed[:, 2] >= 0).ravel()\n\n ##A boolean array used to select the exterior edges only\n self.ids_exterior_ed = (ed2ed[:, 2] < 0).ravel()\n\n ## The triangulation matrix that defined each element.\n # elm[i, :] gives the global vertex numbers that make up the element.\n # This matrix is ordered such that the first num2Dtri elements are\n # triangular elements, while the remaining ones are quadrilaterals.\n self.elm = elm\n\n ## The different unique types of elements in the triangulation.\n # TODO: CF: this is differently abled -- if only there were some type of\n # ... dictionary in python which could take a readable name as a key...\n self.u_elm_type = np.array(u_elm_type, dtype = int)\n\n ## The element type. u_elm_type[elm_type[i]] gives the type of element\n #for global element number i.\n self.elm_type = elm_type\n\n ## The edge element type. u_ed_type[elm_type[i]] gives the type of\n #edge element for global edge element number i. For 2D, there is only\n #one edge type -- lines\n self.ed_type = np.zeros(len(self.ed2ed), dtype=int)\n\n ## The different unique types of edges in the triangulation. for 2D\n # there is only the one type -- lines\n self.u_ed_type = np.array([0], dtype = int)\n\n ## Gives the total number of elements in the triangulation.\n # The number of triangles is given by n_tri, and the number of\n # quads can be calculated using n_elm-n_tri\n self.n_elm = n_elm\n\n ## Gives the number of elements of a particular type in the triangulation\n self.n_elm_type = n_elm_type\n\n ## Gives the total number of edges in the triangulation.\n self.n_ed = len(self.ed2ed)\n\n ## Gives the number of edge elements of a particular type\n self.n_ed_type = [len(self.ed2ed)]\n\n # Array giving the x-y coordinates of the global vertices in the\n # triangulation.\n self.vert = vert\n\n # The dimension of the mesh, dim=2, since this Mesh2D is exclusively for\n # 2D meshes.\n self.dim = dim\n\n ##Vertex map, maps the vertex number from one periodic edge to the\n #other. This map is needed when comparing the orientation of the edge\n #on the element to the orientation of the periodic edge. The element on\n #the right will not have matching vertex numbers, because it's edge\n #used to be a boundary edge, but has disappeared because of the\n #periodicity.\n # EG. in 1D:\n # [0] a1--(A)--a0 1 b0--(B)--b1 [2] ==> 0 --(A)-- 1 --(B)-- 0\n # ed2ed = [A a0 B b0 1 ed2ed = [A a0 B b0 1\n # A a1 -1 -1 0 ==> A a1 B b1 0]\n # B b1 -1 -1 2]\n # elm = [0 1 ==> elm = [0 1\n # 1 2] 1 2]\n #\n #This array is populated in the msh.mk.periodic function\n self.vertmap = None\n\n #Next we have to build a rather annoying structure -- the elements have\n #global numbers -- however the data is stored/organized according to the\n #element type. So within the element type, the element will have a\n #different number/location. The next structure figures out what that\n #element number is. The same goes for the edge types\n ## The \"global element number\" to \"element number within a type\"\n # conversion array. For example, the data for global element number i\n # is stored in field[elm_type[i]][:, :, glob2type_elm[i]].\n self.glob2type_elm = np.zeros(self.n_elm, dtype=int)\n sumtype = [0] * len(self.u_elm_type)\n for i in range(self.n_elm):\n elm_type = self.elm_type[i]\n self.glob2type_elm[i] = sumtype[elm_type]\n sumtype[elm_type] += 1\n\n ## The \"global edge number\" to \"edge number within a type\"\n # conversion array. For example, the data for global edge number i\n # is stored in field_ed[ed_type[i]][:, :, glob2type_ed[i]].\n self.glob2type_ed = np.zeros(self.n_ed, dtype=int)\n sumtype = [0] * len(self.u_ed_type)\n for i in range(self.n_ed):\n ed_type = self.ed_type[i]\n self.glob2type_ed[i] = sumtype[ed_type]\n sumtype[ed_type] += 1\n\n ##A list of boolian arrays used to select the interior edges only\n self.ids_interior_ed_by_type = [self.ids_interior_ed[self.ed_type == i] \\\n for i in range(len(self.n_ed_type))]\n\n ##A list of boolian arrays used to select the exterior edges only\n self.ids_exterior_ed_by_type = [self.ids_exterior_ed[self.ed_type == i] \\\n for i in range(len(self.n_ed_type))]\n\n ##Index mapping array from ed_type edge id number to ed_bc_type id\n #number. Basically, in the solver we will refer to, for e.g. the\n #data field_ed[i][:, :, j], where j refers to a boundary edge, numbered\n #according to the element-type local id number. The boundary condition\n #data is stored in an array smaller that field_ed, that is, field_ed_bc\n #contains ONLY the boundary condition information, so calling\n #field_ed_bc[i][:, :, j] will exceed the array bounds. Instead we call\n #field_ed_bc[i][:, :, in2ex_bcid[j]].\n #TODO: Determine if this array is actually still needed\n # (Indexing has been improved since the below was implemented)\n self.in2ex_bd_id = [ex.cumsum()-1 for ex in self.ids_exterior_ed_by_type]\n\n def connectivity_by_elm_type(self, elm_type):\n \"\"\" returns the connectivity list for a single element type \"\"\"\n conn_mask = self.elm_type == self.elm_enumeration[elm_type]\n if elm_type == \"TRIANGLE\":\n return self.elm[conn_mask, :3] # ignore -1\n elif elm_type == \"QUAD\":\n return self.elm[conn_mask, :]\n else:\n raise ValueError(\"elm_type not understood\")\n\n def edge_vertex_numbers(self):\n \"\"\" returns the edge vertex numbers in the mesh \"\"\"\n return self.ed2ed[:, 4:]\n\n def edge_vertices(self):\n \"\"\" returns the points of all the edge vertices in the mesh\n @retval ed_verts (n_edges, dim, local vertex number (0, 1))\n \"\"\"\n ed = self.edge_vertex_numbers()\n return self.vert[:,:2][ed]\n\n def fix(self):\n ''' Function that ensures the the elements are properly numbered in\n a counter-clockwise fashion, with no crosses. This function updates the\n elm and vert data members.\n @see msh.util.fix\n '''\n self.elm, self.vert = fix(self.elm, self.vert)\n\n def vol(self, ids=None):\n return simpvol2D(self.elm, self.vert) if ids is None else simpvol2D(self.elm[ids,:], self.vert)\n\n def set_bc_ids(self, bc_id_lambda):\n \"\"\"To change the default id number for boundary conditions, you can\n use this function\n\n @param bc_id_lambda (\\c lambda function) List of lambda functions. The\n id of the list determines the id of the boundary.\n bc_id_lambda = lambda (p): f(p)\n where p is a numpy array with p.shape = (n_ext_ed, dim) with the\n centroids of the edges. bc_id_lambda[i](p) should evaluate to\n True if that edge should have the id '-i'.\n\n CF: Note that the list of boundaries are traversed in the order they\n occur in the list bc_id_lambda, so the final ID is the LAST index of\n the containing the lambda function which returns true when called on\n the edge centroid.\n \"\"\"\n #Find edge centroids\n ids = (self.ids_interior_ed == False).nonzero()[0]\n vts = self.ed2ed[ids, 4:]\n\n p = np.array([coord[vts].mean(1) for coord in self.vert[:].T]).T\n\n for i in range(len(bc_id_lambda)):\n self.ed2ed[ids[bc_id_lambda[i](p)], 2:3] = -i - 1\n\n #Boundary condition information\n tot_bc_ids = -min(self.ed2ed[self.ids_exterior_ed, 2])\n\n ##Total number of different bc_ids\n self.n_bc_id = tot_bc_ids\n\n def write_mesh_to_vtk(self, filename):\n \"\"\"\n author: CF\n write the 2D mesh out to VTK file so that it can be viewed in Paraview\n or some similar software\n \"\"\"\n\n pts, conn = self.vert, self.elm\n Points, Cells = vtk.vtkPoints(), vtk.vtkCellArray()\n\n # add node / connectivity information to VTK object\n for pt in pts:\n Points.InsertNextPoint(pt)\n\n for cn in conn:\n cell = vtk.vtkTriangle() if cn[-1] == -1 else vtk.vtkQuad()\n for idx, pt in enumerate(cn):\n if pt != -1:\n cell.GetPointIds().SetId(idx, pt)\n Cells.InsertNextCell(cell)\n\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(Points)\n polydata.SetPolys(Cells)\n\n # write VTK object to file\n polydata.Modified()\n if vtk.VTK_MAJOR_VERSION <= 5:\n polydata.Update()\n\n writer = vtk.vtkXMLPolyDataWriter();\n writer.SetFileName(filename);\n if vtk.VTK_MAJOR_VERSION <= 5:\n writer.SetInput(polydata)\n else:\n writer.SetInputData(polydata)\n writer.Write()\n\n###############################################################################\n\nclass Extrude2D:\n \"\"\" This is the base class for translating 2D data to 3D data. This class\n should probably never be actually used.\n \"\"\"\n\n def __init__(self, data_2D, data_extrude):\n #set Default length\n self._len = len(data_2D)\n\n def __len__(self):\n return self._len\n\n def _key2globids(self, key, length):\n \"\"\" By default this class uses numpy arrays for indexing. If the user\n inputs slice data, we need to turn this into a list of ids in numpy\n array format.\n \"\"\"\n start = key.start\n stop = key.stop\n step = key.step\n\n #Debugging check\n #print \"start:\", start, \" stop:\", stop, \" step:\", step\n if key.stop == None:\n stop = length\n elif key.stop < 0:\n stop = length + key.stop + 1\n #If I don't add this line, I get this strange error where, if I call\n #v3d[:], stop is equal to a very large number...\n #Uncomment the following lines to NOT get the error\n elif stop > length:\n stop = length\n if key.start == None:\n start = 0\n elif key.start < 0:\n start = length + key.start\n if key.step == None:\n step = 1\n\n #global ids are the indices in the 3D frame of reference (global frame)\n globids = np.array(range(start, stop, step), dtype=int)\n return globids\n\n def __getitem__(self, key):\n \"\"\"\n This makes 2D data act like 3D data, but is accessed the same way as\n a normal numpy array. The main purpose of this function is to do the\n user input parsing, which will be inherited.\n \"\"\"\n #Do some input parsing\n\n #By default, the x, y, and z-coordinates are returned\n xyz_key = np.array(range(self._dim))\n\n #Now parse which vertices get returned. There are a number of allowed\n #cases. For the first set, the user only inputs one set of indices:\n #1. User inputs a slice (i.e. Extrude_2D[start:stop:step])\n #2. User inputs a single integer (i.e. Extrude_2D[7])\n #3. User inputs a list (i.e. Extrude_2D[[1,2,3,4,6]])\n #4. User inputs a numpy array (i.e. Extrude_2D[np.array([1,2,3,4,6])])\n\n #Also allowed is a tuple or list of indices. This tuple should allow\n #for any combination of the above four cases.\n #For example, Extrude_2D(:, [0,2]), Extrude_2D(1, :), and\n # Extrude_2D[[1, [0,2]]] are all allowed.\n\n #First handle the single-input cases that are not lists or numpy arrays\n if type(key) == slice:\n globids = self._key2globids(key, self._len)\n elif type(key) == int:\n if key < 0:\n globids = np.array([key + self._len], dtype=int)\n else:\n globids = np.array([key], dtype=int)\n #Next do tuples\n elif type(key) == tuple:\n #First index of the tuple\n if type(key[0]) == slice:\n globids = self._key2globids(key[0], self._len)\n elif type(key[0]) == list:\n globids = np.array(key[0], dtype=int)\n elif type(key[0]) == int:\n if key[0] < 0:\n globids = np.array([key[0] + self._len], dtype=int)\n else:\n globids = np.array([key[0]], dtype=int)\n else: #numpy array assumed if nothing else\n globids = key[0]\n #second index of the tuple\n if type(key[1]) == slice:\n xyz_key = self._key2globids(key[1], self._dim)\n elif type(key[1]) == list:\n xyz_key = np.array(key[1], dtype=int)\n elif type(key[1]) == int:\n if key[1] < 0:\n xyz_key = np.array([key[1] + self._dim], dtype=int)\n else:\n xyz_key = np.array([key[1]], dtype=int)\n else:\n xyz_key = key[1]\n #Now handle the case of the list\n elif type(key) == list:\n if len(key) == 1:\n globids = np.array(key, dtype=int)\n elif (type(key[0]) != list) and (type(key[1]) != list):\n globids = np.array(key, dtype=int)\n else:\n if type(key[0]) == list:\n globids = np.array(key[0], dtype=int)\n elif type(key[0]) == int:\n if key[0] < 0:\n globids = np.array([key[0] + self._len], dtype=int)\n else:\n globids = np.array([key[0]], dtype=int)\n else:\n globids = key[0]\n if type(key[1]) == list:\n xyz_key = np.array(key[1], dtype=int)\n elif type(key[1]) == int:\n if key[1] < 0:\n xyz_key = np.array([key[1] + self._dim], dtype=int)\n else:\n xyz_key = np.array([key[1]], dtype=int)\n else:\n xyz_key = key[1]\n else: #Finally, assumed a single numpy array if nothing else\n globids = np.array(key, dtype=int)\n\n #Now return the 3D data, using the _return_rule function to figure out\n #exactly what that means.\n return self._return_rule(globids, xyz_key)\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This is the function that determines what is returned and how.\n \"\"\"\n pass\n\n#Now define the first class that inherits from Extrude_2D\nclass Vert2D_extrude(Extrude2D):\n \"\"\" Let's users interact with a data-set of extrude rules\n and 2D points as though it was a data-set of 3D points.\n \"\"\"\n def __init__(self, vert, dz):\n \"\"\"The constructor for this object assigns the required data\n structures.\n @param vert (\\c float) Numpy array of 2D vertices\n @param dz (\\c float) Numpy array of \\f$\\Delta z\\f$'s associated with\n each vertex. dz.shape = (len(vert), nlevels)\n\n @author Matt Ueckermann\n \"\"\"\n ##Private copy of the 2D vertices\n self._vert2D = vert\n ##private copy of the \\f$\\Delta z\\f$'s transformed to z-coordinate\n #points\n self._z = np.column_stack((np.zeros((len(vert),1)), -np.cumsum(dz, 1)))\n ##Private copy of the number of z-levels\n self._zlevels = len(dz[0]) + 1\n ##The defined length for this class, in this case the number of vertices\n #times the number of zlevels\n self._len = len(vert) * self._zlevels\n ##The dimension of the problem, in this case clearly 3D\n self._dim = 3\n ##To mimick numpy arrays, we define the shape variable\n self.shape = (self._len, self._dim)\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This routine defines the return rule based on a global id number.\n Basically how to combine the 2D information and extrude information\n to get 3D information\n \"\"\"\n #Debugging print statements\n #print globids, xyz_key\n vert2D_ids = globids / self._zlevels\n z_ids = globids % self._zlevels\n\n if np.any(xyz_key == 2):\n #This is a little ugly, but it basically takes columns of 2D\n #vertices and attaches or stacks to that colums of the z-coordinate.\n #THEN it selects the second index that the user wanted.\n return np.column_stack((self._vert2D[vert2D_ids, 0:self._dim-1], \\\n self._z[vert2D_ids, z_ids]))[:, xyz_key]\n else:\n return self._vert2D[np.ix_(vert2D_ids, xyz_key)]\n\n#Next define the class that does the 3D vertices.\nclass Elm2D_extrude(Extrude2D):\n \"\"\"Let's users interact with a 2D triangulation and the number of extruded\n elements as if it was a 3D triangulation\n \"\"\"\n def __init__(self, elm, levels):\n \"\"\"The constructor for this object assigns the required data\n structures.\n @param elm (\\c int) Numpy array of 2D triangulation\n @param levels (\\c int) Number of vertices in the vertical. Note,\n zlevels = levels + 1, where levels is the number of\n ELEMENTS in the vertical\n\n @author Matt Ueckermann\n \"\"\"\n ## The base 2D triangulation using the global vertex numbers\n self._elm2D = elm * (levels + 1)\n ##Private copy of the number of levels\n self._levels = levels\n\n ##The defined length for this class, in this case the number of elements\n #times the number of levels\n self._len = len(elm) * self._levels\n ##The dimension of the problem, in this case, means the maximum number\n #of vertices in a 3D element.\n self._dim = 8 #for extruded quads\n\n ##To mimick numpy arrays, we define the shape variable\n self.shape = (self._len, self._dim)\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This routine defines the return rule based on a global id number.\n Basically how to combine the 2D information and extrude information\n to get 3D information\n \"\"\"\n #Debugging print statements\n #print globids, xyz_key\n elm2D_ids = globids / self._levels\n #slightly tricky, we multiplied elm2D by zlevels, but since we only have\n #levels element in each column, we divide by levels, not zlevels when\n #figuring out the element ids\n z_ids = np.tile(globids % self._levels, (4, 1)).T\n\n tmp = np.column_stack((self._elm2D[elm2D_ids, :] + 1 + z_ids, \\\n self._elm2D[elm2D_ids, :] + z_ids))\n tmp[tmp < 0] = -1\n tmp[tmp[:, 7] < 0, 0:7] = \\\n tmp[np.ix_(tmp[:, 7] < 0, [0, 1, 2, 4, 5, 6, 7])]\n return tmp[:, xyz_key]\n\n#Now we do the element types\nclass Elm_type2D_extrude(Extrude2D):\n \"\"\"Let's users interact with a 2D element connectivity and the # of extruded\n elements as if it was a 3D element connectivity matrix\n \"\"\"\n def __init__(self, elm_type, levels):\n \"\"\"The constructor for this object assigns the required data\n structures.\n @param elm_type (\\c int) Numpy array of element types for a\n 2D triangulation\n @param levels (\\c int) Number of vertices in the vertical. Note,\n zlevels = levels + 1, where levels is the number of\n ELEMENTS in the vertical and zlevels the number of\n vertices.\n\n @author Matt Ueckermann\n \"\"\"\n ## The base 2D triangulation using the element numbers\n self._elm_type2D = elm_type\n\n ##Private copy of the number of levels\n self._levels = levels\n\n ##The defined length for this class, in this case the number of elements\n #times the number of levels\n self._len = len(elm_type) * self._levels\n ##The dimension of the problem, in this case, means the maximum number\n #of vertices in a 3D element.\n self._dim = 1 #for extruded quads\n\n ##To mimick numpy arrays, we define the shape variable\n self.shape = (self._len, self._dim)\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This routine defines the return rule based on a global id number.\n Basically how to combine the 2D information and extrude information\n to get 3D information\n \"\"\"\n #This one is pretty simply. The elements in the column will have the\n #same type\n elm2D_ids = globids / self._levels\n\n return self._elm_type2D[elm2D_ids]\n\n#Now we start with the connectivity matrices\nclass Elm2Elm2D_extrude(Extrude2D):\n \"\"\"Let's users interact with a 2D element connectivity and the # of extruded\n elements as if it was a 3D element connectivity matrix\n \"\"\"\n def __init__(self, elm2elm, levels, topbcid=-5, botbcid=-6):\n \"\"\"The constructor for this object assigns the required data\n structures.\n @param elm2elm (\\c float) Numpy array of 2D triangulation\n @param levels (\\c int) Number of vertices in the vertical. Note,\n levels = levels + 1, where levels is the number of\n ELEMENTS in the vertical\n @param topbcid (\\c int) Negative integer giving the id of the top\n boundary condition. Default = -1\n @param botbcid (\\c int) Negative integer giving the id of the bottom\n boundary condition. Default = -2\n\n @note If botbcid == topbcid and > 0, then assume periodic\n\n @author Matt Ueckermann\n \"\"\"\n ## The base 2D triangulation using the element numbers\n self._elm2elm2D = elm2elm * (levels)\n #Fix the boundary conditions\n self._elm2elm2D[elm2elm < 0] = elm2elm[elm2elm < 0]\n\n ##Private copy of the number of levels\n self._levels = levels\n\n ##The defined length for this class, in this case the number of elements\n #times the number of levels\n self._len = len(elm2elm) * self._levels\n ##The dimension of the problem, in this case, means the maximum number\n #of vertices in a 3D element.\n self._dim = 6 #for extruded quads\n\n ##To mimick numpy arrays, we define the shape variable\n self.shape = (self._len, self._dim)\n\n ##The bottom boundary condition id.\n self._botbc = botbcid\n\n ##The top boundary condition id.\n self._topbc = topbcid\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This routine defines the return rule based on a global id number.\n Basically how to combine the 2D information and extrude information\n to get 3D information\n \"\"\"\n #Debugging print statements\n #print globids, xyz_key\n elm2D_ids = globids / self._levels\n z_ids = globids % self._levels\n\n #This is a few-step process to get the boundary conditions correct\n tmp = self._elm2elm2D[elm2D_ids, :]\n #Only affect the non-negative ids\n ids = tmp < 0\n negtemp = tmp[ids]\n tmp = tmp + np.tile(z_ids, (4, 1)).T\n tmp[ids] = negtemp\n\n tmp = np.column_stack((globids + 1, globids - 1, tmp))\n\n #fix top and bottom boundary conditions\n if (self._botbc == self._topbc) and (self._topbc > 0):\n tmp[z_ids == self._levels - 1, 0] = \\\n globids[z_ids == self._levels - 1]\n tmp[z_ids == 0, 1] = \\\n globids[z_ids == 0] + self._levels - 1\n else:\n tmp[z_ids == self._levels - 1, 0] = self._botbc\n tmp[z_ids == 0, 1] = self._topbc\n\n return tmp[:, xyz_key]\n\n#Then the edge connectivity matrix\nclass Ed2Ed2D_extrude(Extrude2D):\n \"\"\"Let's users interact with a 2D element connectivity and the # of extruded\n elements as if it was a 3D element connectivity matrix\n \"\"\"\n def __init__(self, ed2ed, elm, levels, topbcid=-5, botbcid=-6):\n \"\"\"The constructor for this object assigns the required data\n structures.\n @param ed2ed (\\c int) Numpy array of 2D triangulation\n @param elm (\\c int) Numpy array defining element in terms of global\n vertex numbers.\n @param levels (\\c int) Number of vertices in the vertical. Note,\n zlevels = levels + 1, where levels is the number of\n ELEMENTS in the vertical\n @param topbcid (\\c int) Negative integer giving the id of the top\n boundary condition. Default = -1\n @param botbcid (\\c int) Negative integer giving the id of the bottom\n boundary condition. Default = -2\n\n @note If botbcid == topbcid and > 0, then assume periodic\n\n @author Matt Ueckermann\n \"\"\"\n ## The base 2D triangulation using the element numbers\n self._ed2ed2D = ed2ed.copy()\n #Do the multiplications to use the global element numbers for the\n #surface mesh\n #first for the element numbers\n ids = ed2ed[:, 0] >= 0\n self._ed2ed2D[ids, 0] = self._ed2ed2D[ids, 0] * levels\n ids = ed2ed[:, 2] >= 0\n self._ed2ed2D[ids, 2] = self._ed2ed2D[ids, 2] * levels\n #then for the vertex numbers (which requires zlevels instead)\n #also add by 1 because it is convenient later. The faces have to\n #be labelled counter clockwise, and start numbering from the bottom\n #vertices. :)\n self._ed2ed2D[:, 4:] = self._ed2ed2D[:, 4:] * (levels + 1) + 1\n\n ##Private copy of the number of interior edges (not boundary) for the\n # vertical faces\n self._n_ed_in = np.sum(ed2ed[:,2] >= 0)\n\n ##Private copy of the element triangulation matrix, used for the top\n #and bottom edge connectivity\n self._elm2D = elm * (levels + 1)\n\n ##Private copy of the number of levels\n self._levels = levels\n\n ##The defined length for this class, in this case the number of elements\n #times the number of levels\n self._len = len(ed2ed) * self._levels + len(elm) * (self._levels + 1)\n ##The number of columns in the ed2ed matrix. This is equal to 4 + the\n #number of vertices that make up a face. That will be a max of 8 in 3D\n self._dim = 8 #for extruded quads\n\n ##To mimick numpy arrays, we define the shape variable\n self.shape = (self._len, self._dim)\n\n ##The bottom boundary condition id.\n self._botbc = botbcid\n\n ##The top boundary condition id.\n self._topbc = topbcid\n\n def _return_rule(self, globids, xyz_key):\n \"\"\"This routine defines the return rule based on a global id number.\n Basically how to combine the 2D information and extrude information\n to get 3D information\n \"\"\"\n #intialize tmp\n tmp = []\n\n #The edge ids are divided into 4 major categories:\n #1. Interior vertical edges (0:n_ed_in * levels)\n #2. Interior horizontal edges\n # <start> : len(_elm2D) * (levels - 1) + <start>\n #3. Boundary vertical edges\n # <start> : len(ed2ed) - n_ed_in + <start>\n #4. Boundary horizontal edges\n # <start> : len(_elm2D) * 2 + <start>\n #\n # and each category should be dealt with separately\n\n #Do the first category\n ids_done = np.array(len(globids) * [False], dtype=bool)\n ids = globids < (self._n_ed_in * self._levels)\n\n if np.any(ids):\n #print 'first'\n elm2D_ids = globids[ids] / self._levels\n z_ids = globids[ids] % self._levels\n\n #Note the colstack is needed to connect the two vertices (at different\n # zlevels). The bottom two vertices are already included in the\n #ed2ed matrices\n tmp1 = np.column_stack((self._ed2ed2D[elm2D_ids, :4],\n self._ed2ed2D[elm2D_ids, 4] + z_ids,\n self._ed2ed2D[elm2D_ids, 5] + z_ids,\n self._ed2ed2D[elm2D_ids, 5] - 1 + z_ids,\n self._ed2ed2D[elm2D_ids, 4] - 1 + z_ids))\n #modify the element ids to be correct\n ids = tmp1[:, 0] >= 0\n tmp1[ids, 0] = tmp1[ids, 0] + z_ids[ids]\n ids = tmp1[:, 2] >= 0\n tmp1[ids, 2] = tmp1[ids, 2] + z_ids[ids]\n #the local edge ids also need correction\n tmp1[:, 1] = tmp1[:, 1] + 2\n tmp1[:, 3] = tmp1[:, 3] + 2\n #Record which globids have been handled already\n\n # CF: OLD MPU CODE DOES THIS:\n # ids_done[ids] = True\n #\n # where he applies a boolean mask that's shorter than the actual\n # array. Is this is a shortcut or a bug? Unclear, but explicitly\n # padding to fix the warning.\n padLength = len(ids_done) - len(ids)\n pad = np.zeros(padLength, dtype=bool)\n paddedIds = np.append(ids, pad)\n ids_done[paddedIds] = True\n tmp = tmp1\n\n #Second category\n start = (self._n_ed_in * self._levels)\n ids = globids < (start + len(self._elm2D) * (self._levels - 1))\n #Don't want to include the ids already done, so remove them\n ids[ids_done] = False\n\n if np.any(ids):\n #print 'second'\n #This is illusively tricky. There are only levels-1 internal\n #horizontal faces, so when the global face ids are converted to\n #local 2D ids, we have to divide by levels-1, however...\n elm2D_ids = (globids[ids] - start) / (self._levels - 1)\n z_ids = (globids[ids] - start) % (self._levels - 1)\n\n #... when calculating the global element number we have to multiply\n #the local 2D element number by the TOTAL number of levels.\n tmp2 = np.column_stack((elm2D_ids * self._levels + z_ids + 1,\n np.ones_like(z_ids),\n elm2D_ids * self._levels + z_ids,\n np.zeros_like(z_ids),\n self._elm2D[elm2D_ids, 0] + z_ids + 1,\n self._elm2D[elm2D_ids, 1] + z_ids + 1,\n self._elm2D[elm2D_ids, 2] + z_ids + 1,\n self._elm2D[elm2D_ids, 3] + z_ids + 1))\n\n ids_done[ids] = True\n\n\n #correct the final column of tmp2\n ids = self._elm2D[elm2D_ids, 3] < 0\n tmp2[ids, -1] = -1\n if len(tmp):\n tmp = np.concatenate((tmp, tmp2))\n else:\n tmp = tmp2\n\n #Third category\n start = start + len(self._elm2D) * (self._levels - 1)\n ids = globids < ( start + \\\n (len(self._ed2ed2D) - self._n_ed_in) * self._levels )\n #Don't want to include the ids already done, so remove them\n ids[ids_done] = False\n\n if np.any(ids):\n #print 'third'\n #Record which globids have been handled already\n ids_done[ids] = True\n #There is an offset here because for the vertical faces, we only\n #deal with the boundary elements of the 2D mesh\n elm2D_ids = (globids[ids] - start) / self._levels + self._n_ed_in\n z_ids = (globids[ids] - start) % self._levels\n\n #Note the colstack is needed to connect the two vertices (at different\n # zlevels). The bottom two vertices are already included in the\n #ed2ed matrices\n tmp1 = np.column_stack((self._ed2ed2D[elm2D_ids, :4], \\\n self._ed2ed2D[elm2D_ids, 4] + z_ids, \\\n self._ed2ed2D[elm2D_ids, 5] + z_ids, \\\n self._ed2ed2D[elm2D_ids, 5] - 1 + z_ids, \\\n self._ed2ed2D[elm2D_ids, 4] - 1 + z_ids))\n #modify the element ids to be correct\n ids = tmp1[:, 0] >= 0 #should really be all of them\n tmp1[ids, 0] = tmp1[ids, 0] + z_ids[ids]\n #ids = tmp1[:, 2] >= 0\n #tmp1[ids, 2] = tmp1[ids, 2] + z_ids[ids]\n #the local edge ids also need correction\n tmp1[:, 1] = tmp1[:, 1] + 2\n #tmp1[:, 3] = tmp1[:, 3] + 2\n\n if len(tmp):\n tmp = np.concatenate((tmp, tmp1))\n else:\n tmp = tmp1\n\n #Fourth category\n start = start + (len(self._ed2ed2D) - self._n_ed_in) * self._levels\n ids = globids < (start + len(self._elm2D) * (2))\n #Don't want to include the ids already done, so remove them\n ids[ids_done] = False\n\n if np.any(ids):\n #print 'fourth'\n #This is illusively tricky. There are only levels-1 internal\n #horizontal faces, so when the global face ids are converted to\n #local 2D ids, we have to divide by levels-1, however...\n elm2D_ids = (globids[ids] - start) / (2)\n z_ids = (globids[ids] - start) % (2)\n\n #... when calculating the global element number we have to multiply\n #the local 2D element number by the TOTAL number of levels.\n tmp2 = np.column_stack((\\\n elm2D_ids * self._levels + z_ids * (self._levels - 1), \\\n np.zeros_like(z_ids), \\\n np.ones((len(z_ids), 2), dtype=int), \\\n self._elm2D[elm2D_ids, 0] + z_ids * (self._levels), \\\n self._elm2D[elm2D_ids, 1] + z_ids * (self._levels), \\\n self._elm2D[elm2D_ids, 2] + z_ids * (self._levels), \\\n self._elm2D[elm2D_ids, 3] + z_ids * (self._levels)))\n\n ids_done[ids] = True\n\n #now fix the boundary conditions and local face numbering\n ids = (z_ids == 0)\n tmp2[ids, 2] = self._topbc\n tmp2[ids, 1] = 1\n ids = (z_ids == 1)\n tmp2[ids, 2] = self._botbc\n #The bottom boundaries also need to have the vertex ordering\n #re-order, so that the bottom normal will be OUTWARD pointing\n swtch= np.array([[6],[4]])\n swtch2= np.array([[4],[6]])\n tmp2[ids, swtch] = tmp2[ids, swtch2]\n\n if len(tmp):\n tmp = np.concatenate((tmp, tmp2))\n else:\n tmp = tmp2\n\n return tmp[:, xyz_key]\n\n#===============================================================================\n# ##############################################################################\n#===============================================================================\nclass Mesh3D(Mesh):\n \"\"\"This is the base class for 3D meshes\"\"\"\n def __init__(self):\n pass\n\n def __len__(self):\n return self.n_elm\n\n def set_bc_ids(self, bc_id_lambda):\n \"\"\"To change the default id number for boundary conditions, you can\n use this function\n\n @param bc_id_lambda (\\c lambda function) List of lambda functions. The\n id of the list determines the id of the boundary.\n bc_id_lambda = lambda (p): f(p)\n where p is a numpy array with p.shape = (n_ext_ed, dim) with the\n centroids of the edges. bc_id_lambda[i](p) should evaluate to\n True if that edge should have the id '-i'.\n\n \"\"\"\n if bc_id_lambda != None:\n #Find edge centroids\n ids = (self.ids_interior_ed == False).nonzero()[0]\n vts = self.ed2ed[ids, 4:]\n\n p = np.array([coord[vts].mean(1) for coord in self.vert[:].T]).T\n ids_tri = self.ed_type[ids] == False\n p[ids_tri, :] = np.array([coord[vts[ids_tri, :3]].mean(1)\\\n for coord in self.vert[:].T]).T\n for i in range(len(bc_id_lambda)):\n self.ed2ed[ids[bc_id_lambda[i](p)], 2:3] = -i - 1\n else:\n print(\"Input function to set_bcs_ids was 'None'.\" + \\\n \" New boundary conditions id's were not set.\")\n\n#===============================================================================\n# Mesh3D_Extrude\n#===============================================================================\nclass Mesh3D_extrude(Mesh3D):\n \"\"\"This class acts as a container for a 3D mesh defined by a 2D surface\n mesh that is extruded down cumulatively by dz, defined at each grid-point\n \"\"\"\n def __init__(self, mesh2D, dz, topbcid=-5, botbcid=-6):\n '''\n This initializes the extruded mesh object, and can henceforth be used\n as though it was a 3D mesh object.\n\n @param mesh2D (\\c Mesh2D object) An instance of the Mesh2D class.\n @param dz (\\c int) Array that keeps track of the \\f$\\Delta z\\f$'s\n defined at each vertex in the mesh.\n @param topbcid (\\c int) Negative integer giving the id of the top\n boundary condition. Default = -1\n @param botbcid (\\c int) Negative integer giving the id of the bottom\n boundary condition. Default = -2\n\n @author Matt Ueckermann\n '''\n #Validate input\n assert(topbcid < 0 and botbcid < 0)\n\n ##A copy of the 2D mesh from which the 3D mesh is created\n self.mesh2D = mesh2D\n\n ## The number of vertically structured levels\n self.levels = len(dz[0])\n\n ##Negative integer giving the id of the top boundary condition.\n #Default = -1\n self.topbcid = topbcid\n\n ##Negative integer giving the id of the bottom\n # boundary condition. Default = -2\n self.botbcid = botbcid\n\n ## The element connectivity matrix. elm2elm[i, j] gives the element\n # number which is connected to element i, through edge j of element i.\n self.elm2elm = Elm2Elm2D_extrude(mesh2D.elm2elm, self.levels, \\\n self.topbcid, self.botbcid)\n #Now make a real actual copy of this array\n self.elm2elm = self.elm2elm[:]\n\n #Make an class that makes the 3D array of ed2ed\n ed2ed = Ed2Ed2D_extrude(mesh2D.ed2ed, mesh2D.elm, self.levels, \\\n self.topbcid, self.botbcid)\n ## The edge connectivity matrix.\n # ed2ed[i, 0:2] gives the [element #, local edge #] of the plus-side\n # element.\n # ed2ed[i, 2:4] gives the [element #, local edge #] of the minus-side\n # element.\n # ed2ed[i, 4:] gives the vertices that make up the edge. These are\n #numbered count-clockwise with outward-point normal (according to\n #Right-hand rule)\n self.ed2ed = ed2ed[:] #Make an actual in-memory array\n\n ##A boolian array used to select the interior edges only\n self.ids_interior_ed = (ed2ed[:, 2] >= 0).ravel()\n\n ##A boolian array used to select the exterior edges only\n self.ids_exterior_ed = (ed2ed[:, 2] < 0).ravel()\n\n ## The triangulation matrix that defined each element.\n # elm[i, :] gives the global vertex numbers that make up the element.\n # This matrix is ordered such that the first num2Dtri elements are\n # triangular elements, while the remaining ones are quadrilaterals.\n self.elm = Elm2D_extrude(mesh2D.elm, self.levels)\n\n ## The different or unique types of elements in the triangulation.\n self.u_elm_type = copy.copy(mesh2D.u_elm_type)\n #Triangles become prisms\n self.u_elm_type[self.u_elm_type == 0] = 2\n\n ## The different or unique types of edge elements in the triangulation.\n self.u_ed_type = []\n for npe in self.u_elm_type:\n if npe == 0:\n self.u_ed_type.append(0)\n elif npe == 1:\n self.u_ed_type.append(1)\n elif npe == 2:\n self.u_ed_type.append(1)\n self.u_ed_type.append(0)\n self.u_ed_type = util.unique(self.u_ed_type)\n\n #Instead of creating a 2D version and extruding -- it's simpler just\n #to make the whole array -- plus it's a reasonably small array\n #in any case\n ## The edge element type. elm_type[i] gives the type of edge element\n #for global edge element number i. -- where the real type is given in\n #u_ed_type\n self.ed_type = np.array((self.ed2ed[:, -1] >= 0).ravel(), dtype=bool)\n #If there are only rectangles, the relative edge type should be zero!\n if all(self.u_ed_type == 1):\n self.ed_type[:] = 0\n\n #make the elm_type 2D-3D class\n elm_type = Elm_type2D_extrude(mesh2D.elm_type, self.levels)\n ## The element type. elm_type[i] gives the type of element for global\n # element number i, where the real type is given in u_elm_type.\n self.elm_type = elm_type[:] #actual in-memory copy of array\n\n ## Gives the total number of elements in the triangulation.\n # The number of triangles is given by num2Dtri, and the number of\n # quads can be calculated using n_elm-num2Dtri\n self.n_elm = len(self.elm_type)\n\n ## Gives the number of 2D triangular elements in the triangulation.\n self.n_elm_type = [tp * self.levels for tp in mesh2D.n_elm_type]\n\n ## Gives the total number of edges in the triangulation.\n self.n_ed = len(self.ed2ed)\n\n ## Gives the number of edges in the triangulation of a particular type.\n self.n_ed_type = [sum(self.ed_type == i) for i in range(len(self.u_ed_type))]\n\n ## Array giving the x-y-z coordinates of the global vertices in the\n # triangulation.\n self.vert = Vert2D_extrude(mesh2D.vert, dz)\n\n ##The height-map for the 2D vertices (or bathymetry)\n self.h_map = self.vert._z[:, -1].reshape(len(self.vert._z), 1)\n\n #Convert vertices to a real numpy array\n self.vert = self.vert[:]\n\n ##The dimension of the mesh, dim=2, since this Mesh2D is exclusively\n # for 2D meshes.\n self.dim = mesh2D.dim + 1\n\n ##Vertex map, maps the vertex number from one periodic edge to the\n #other. This map is needed when comparing the orientation of the edge\n #on the element to the orientation of the periodic edge. The element on\n #the right will not have matching vertex numbers, because it's edge\n #used to be a boundary edge, but has disappeared because of the\n #periodicity.\n # EG. in 1D:\n # [0] a1--(A)--a0 1 b0--(B)--b1 [2] ==> 0 --(A)-- 1 --(B)-- 0\n # ed2ed = [A a0 B b0 1 ed2ed = [A a0 B b0 1\n # A a1 -1 -1 0 ==> A a1 B b1 0]\n # B b1 -1 -1 2]\n # elm = [0 1 ==> elm = [0 1\n # 1 2] 1 2]\n #\n #This array is populated in the msh.mk.periodic function\n self.vertmap = None\n\n def write_mesh_to_vtk(self, filename):\n \"\"\"\n @author foucartc\n write the 3D mesh out to VTK file so that it can be viewed in Paraview\n or some similar software\n \"\"\"\n pts, conn = self.vert, self.elm[:]\n Points, Cells = vtk.vtkPoints(), vtk.vtkCellArray()\n\n # add node / connectivity information to VTK object\n for pt in pts:\n Points.InsertNextPoint(pt)\n\n grid = vtk.vtkUnstructuredGrid()\n grid.SetPoints(Points)\n\n for idx, cn in enumerate(conn):\n if cn[-1] == -1:\n cell = vtk.vtkWedge()\n cnRef = cn[0:6]\n for idx,pt in enumerate(cnRef):\n cell.GetPointIds().SetId(idx,pt)\n\n else:\n cell = vtk.vtkHexahedron()\n for idx, pt in enumerate(cn):\n cell.GetPointIds().SetId(idx, pt)\n grid.InsertNextCell(cell.GetCellType(), cell.GetPointIds())\n\n\n writer = vtk.vtkXMLUnstructuredGridWriter();\n writer.SetInputData(grid)\n writer.SetFileName(filename);\n writer.Write()\n" ]
[ [ "numpy.ix_", "numpy.ones_like", "numpy.tile", "numpy.cumsum", "numpy.concatenate", "numpy.append", "numpy.zeros_like", "numpy.any", "numpy.column_stack", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
RemotePixel/remotepixel-py
[ "bd58db7a394c84651d05c4e6f83da4cd3d4c26f3" ]
[ "remotepixel/cbers_ndvi.py" ]
[ "\"\"\"remotepixel.cbers_ndvi module.\"\"\"\n\nimport re\nfrom functools import partial\nfrom concurrent import futures\n\nimport numpy as np\nimport numexpr as ne\n\nimport rasterio\nfrom rasterio import warp\n\nfrom remotepixel.utils import cbers_parse_scene_id, get_area\nfrom rio_tiler.utils import linear_rescale, array_to_img, get_colormap, b64_encode_img\n\nnp.seterr(divide=\"ignore\", invalid=\"ignore\")\n\nCBERS_BUCKET = \"s3://cbers-pds\"\n\n\ndef point(scene, coordinates, expression):\n \"\"\"Point handler.\"\"\"\n bands = tuple(set(re.findall(r\"b(?P<bands>[0-9]{1,2})\", expression)))\n\n scene_params = cbers_parse_scene_id(scene)\n cbers_address = f'{CBERS_BUCKET}/{scene_params[\"key\"]}'\n addresses = [\n \"{}/{}_BAND{}.tif\".format(cbers_address, scene, band) for band in bands\n ]\n\n def worker(address):\n \"\"\"Point worker.\"\"\"\n with rasterio.open(address) as band:\n lon_srs, lat_srs = warp.transform(\n \"EPSG:4326\", band.crs, [coordinates[0]], [coordinates[1]]\n )\n point = list(band.sample([(lon_srs[0], lat_srs[0])]))[0]\n return point[0]\n\n try:\n with futures.ThreadPoolExecutor(max_workers=3) as executor:\n data = list(executor.map(worker, addresses))\n\n ctx = {}\n for bdx, b in enumerate(bands):\n ctx[\"b{}\".format(b)] = data[bdx]\n ratio = np.nan_to_num(ne.evaluate(expression, local_dict=ctx))\n except IndexError:\n ratio = 0.0\n\n date = (\n scene_params[\"acquisitionYear\"]\n + \"-\"\n + scene_params[\"acquisitionMonth\"]\n + \"-\"\n + scene_params[\"acquisitionDay\"]\n )\n\n return {\"ndvi\": ratio, \"scene\": scene, \"date\": date}\n\n\ndef area(\n scene,\n bbox,\n expression,\n expression_range=[-1, 1],\n bbox_crs=\"epsg:4326\",\n out_crs=\"epsg:3857\",\n):\n \"\"\"Area handler.\"\"\"\n max_img_size = 512\n\n bands = tuple(set(re.findall(r\"b(?P<bands>[0-9]{1,2})\", expression)))\n\n scene_params = cbers_parse_scene_id(scene)\n cbers_address = f'{CBERS_BUCKET}/{scene_params[\"key\"]}'\n addresses = [\n \"{}/{}_BAND{}.tif\".format(cbers_address, scene, band) for band in bands\n ]\n\n _worker = partial(\n get_area,\n bbox=bbox,\n max_img_size=max_img_size,\n bbox_crs=bbox_crs,\n out_crs=out_crs,\n )\n with futures.ThreadPoolExecutor(max_workers=3) as executor:\n data = np.concatenate(list(executor.map(_worker, addresses)))\n if not np.any(data):\n raise Exception(\"No valid data in array\")\n\n mask = np.all(data != 0, axis=0).astype(np.uint8) * 255\n\n ctx = {}\n for bdx, b in enumerate(bands):\n ctx[\"b{}\".format(b)] = data[bdx]\n ratio = np.nan_to_num(ne.evaluate(expression, local_dict=ctx))\n\n ratio = np.where(\n mask, linear_rescale(ratio, in_range=expression_range, out_range=[0, 255]), 0\n ).astype(np.uint8)\n\n img = array_to_img(ratio, mask, get_colormap(name=\"cfastie\"))\n ndvi = b64_encode_img(img, \"jpeg\")\n\n date = (\n scene_params[\"acquisitionYear\"]\n + \"-\"\n + scene_params[\"acquisitionMonth\"]\n + \"-\"\n + scene_params[\"acquisitionDay\"]\n )\n\n return {\"ndvi\": ndvi, \"scene\": scene, \"date\": date}\n" ]
[ [ "numpy.all", "numpy.seterr", "numpy.any" ] ]
PyOCL/pyopencl-examples
[ "620e263660fd0c621ce2a0b9899b10bfc194c009" ]
[ "4-3-expand/clustering.py" ]
[ "#!/usr/bin/python3\nimport os\nimport time\nimport random\nimport numpy\nimport pyopencl as cl\nimport pyopencl.array\n\ndef plot_grouping_result(point_cids, group_ids, point_info):\n assert len(point_cids) != 0\n import matplotlib.pyplot as plt\n markers = ['p', '*', '+', 'x', 'd', 'o', 'v', 's', 'h']\n colors = [(random.random(), random.random(), random.random()) for x in range(len(point_cids))]\n while len(point_cids) > 0:\n c_id = point_cids.pop()\n clr = colors.pop()\n makr = markers[random.randint(0, len(markers)-1)]\n x = []\n y = []\n for idx, gid in enumerate(group_ids):\n if gid == c_id:\n x.append(point_info[idx][0])\n y.append(point_info[idx][1])\n plt.scatter(x, y, color=clr, marker=makr)\n\n plt.ylabel('y')\n plt.xlabel('x')\n plt.grid(True)\n plt.show()\n\nif __name__ == '__main__':\n print('load program from cl source file')\n f = open('clustering.cl', 'r', encoding='utf-8')\n kernels = ''.join(f.readlines())\n f.close()\n\n print('prepare data ... ')\n # The number of points randomly generated\n random.seed()\n num_points = 100\n point_ids = list(range(0, num_points))\n point_info = {point_id: (random.random() * 100, random.random() * 100) for point_id in point_ids}\n pointX = [point_info[v][0] for v in point_info]\n pointY = [point_info[v][1] for v in point_info]\n\n # The number of group you want to divide.\n numOfGroups = 5\n group_id_set = list(range(0, numOfGroups))\n cluster_centers_X = []\n cluster_centers_Y = []\n for idx in group_id_set:\n cluster_centers_X.append(pointX[idx])\n cluster_centers_Y.append(pointY[idx])\n cluster_ids = []\n for x in range(num_points):\n cluster_ids.append(x if x < numOfGroups else -1)\n # print('Init cluster id : {}'.format(cluster_ids))\n # for idx in range(num_points):\n # print('Init Points Info : {} - ({}, {})'.format(idx, pointX[idx], pointY[idx]))\n # for idx in range(numOfGroups):\n # print('Init Center Info : {} - ({}, {})'.format(idx, cluster_centers_X[idx], cluster_centers_Y[idx]))\n\n start_time = time.time()\n # prepare host memory for OpenCL\n np_centers_x = numpy.array(cluster_centers_X, dtype=numpy.float32)\n np_centers_y = numpy.array(cluster_centers_Y, dtype=numpy.float32)\n np_point_x = numpy.array(pointX, dtype=numpy.float32)\n np_point_y = numpy.array(pointY, dtype=numpy.float32)\n np_clusters_ids = numpy.array(cluster_ids, dtype=numpy.int32)\n time_hostdata_loaded = time.time()\n\n # create opencl context & queue\n print('create context ...')\n ctx = cl.create_some_context()\n print('create command queue ...')\n queue = cl.CommandQueue(ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)\n time_ctx_queue_creation = time.time()\n\n # prepare device memory for OpenCL\n print('prepare device memory for input / output')\n dev_centers_x = cl.array.to_device(queue, np_centers_x)\n dev_centers_y = cl.array.to_device(queue, np_centers_y)\n dev_points_x = cl.array.to_device(queue, np_point_x)\n dev_points_y = cl.array.to_device(queue, np_point_y)\n dev_clusters_id = cl.array.to_device(queue, np_clusters_ids)\n time_devicedata_loaded = time.time()\n\n print('compile kernel code')\n prg = cl.Program(ctx, kernels).build()\n time_kernel_compilation = time.time()\n\n np_num_of_clusters = numpy.int32(numOfGroups)\n np_num_of_points = numpy.int32(num_points)\n print('execute kernel programs')\n print('wait for kernel executions')\n elapsed = 0\n last_cluster_ids = None\n time_data_readback_total = 0\n for i in range(10000):\n evt = prg.do_clustering(queue, (num_points,), None,\n np_num_of_clusters, np_num_of_points,\n dev_centers_x.data, dev_centers_y.data,\n dev_points_x.data, dev_points_y.data,\n dev_clusters_id.data)\n evt.wait()\n elapsed += 1e-9 * (evt.profile.end - evt.profile.start)\n evt = prg.calc_centroid(queue, (numOfGroups,), None,\n np_num_of_clusters, np_num_of_points,\n dev_centers_x.data, dev_centers_y.data,\n dev_points_x.data, dev_points_y.data,\n dev_clusters_id.data)\n evt.wait()\n elapsed += 1e-9 * (evt.profile.end - evt.profile.start)\n\n time_before_readback = time.time()\n tmp_clusters_ids = dev_clusters_id.get()\n time_data_readback_total += (time.time() - time_before_readback)\n if numpy.array_equal(tmp_clusters_ids, last_cluster_ids):\n print(\"break ........... @ {}\".format(i))\n break\n else:\n last_cluster_ids = tmp_clusters_ids\n\n time_before_readback = time.time()\n cids = dev_clusters_id.get()\n time_data_readback_total = (time.time() - time_before_readback)\n\n print('Prepare host data took : {}'.format(time_hostdata_loaded - start_time))\n print('Create CTX/QUEUE took : {}'.format(time_ctx_queue_creation - time_hostdata_loaded))\n print('Upload data to device took : {}'.format(time_devicedata_loaded - time_ctx_queue_creation))\n print('Compile kernel took : {}'.format(time_kernel_compilation - time_devicedata_loaded))\n print('OpenCL elapsed time : {}'.format(elapsed))\n print('Offload data from device took: {}'.format(time_data_readback_total))\n\n plot_grouping_result(point_ids, cids, point_info)\n print('Results is OK')\n" ]
[ [ "matplotlib.pyplot.scatter", "numpy.array_equal", "numpy.int32", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
JackyXiao98/chinese_ocr
[ "62c45fd6df0959144fad411cf90ee136047802c3" ]
[ "ctpn/text_detect.py" ]
[ "import os\nimport sys\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom lib.utils.timer import Timer\nfrom lib.fast_rcnn.config import cfg\nfrom lib.fast_rcnn.test import test_ctpn\nfrom lib.networks.factory import get_network\nfrom lib.text_connector.detectors import TextDetector\nfrom lib.text_connector.text_connect_cfg import Config as TextLineCfg\n\n\ndef resize_im(im, scale, max_scale=None):\n f = float(scale) / min(im.shape[0], im.shape[1])\n if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:\n f = float(max_scale) / max(im.shape[0], im.shape[1])\n return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f\n\n\ndef load_tf_model():\n # load config file\n cfg.TEST.checkpoints_path = './ctpn/checkpoints'\n\n # init session\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\n config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)\n sess = tf.Session(config=config)\n\n # load network\n net = get_network(\"VGGnet_test\")\n\n # load model\n print('Loading network {:s}... '.format(\"VGGnet_test\"))\n saver = tf.train.Saver()\n try:\n ckpt = tf.train.get_checkpoint_state(cfg.TEST.checkpoints_path)\n print('Restoring from {}...'.format(ckpt.model_checkpoint_path))\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('done')\n except:\n raise 'Check your pretrained {:s}'.format(ckpt.model_checkpoint_path)\n\n return sess, net\n\nsess, net = load_tf_model()\n\ndef ctpn(img):\n timer = Timer()\n timer.tic()\n\n img, scale = resize_im(img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)\n scores, boxes = test_ctpn(sess, net, img)\n\n textdetector = TextDetector()\n boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])\n timer.toc()\n print(\"\\n----------------------------------------------\")\n print(('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n return scores, boxes, img, scale\n\ndef draw_boxes(img, boxes, scale):\n box_id = 0\n img = img.copy()\n text_recs = np.zeros((len(boxes), 8), np.int)\n for box in boxes:\n if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:\n continue\n\n if box[8] >= 0.8:\n color = (255, 0, 0) # red\n else:\n color = (0, 255, 0) # green\n\n cv2.line(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)\n cv2.line(img, (int(box[0]), int(box[1])), (int(box[4]), int(box[5])), color, 2)\n cv2.line(img, (int(box[6]), int(box[7])), (int(box[2]), int(box[3])), color, 2)\n cv2.line(img, (int(box[4]), int(box[5])), (int(box[6]), int(box[7])), color, 2)\n\n for i in range(8):\n text_recs[box_id, i] = box[i]\n\n box_id += 1\n\n img = cv2.resize(img, None, None, fx=1.0/scale, fy=1.0/scale, interpolation=cv2.INTER_LINEAR)\n return text_recs, img\n\ndef text_detect(img):\n scores, boxes, img, scale = ctpn(img)\n text_recs, img_drawed = draw_boxes(img, boxes, scale)\n return text_recs, img_drawed, img\n\nif __name__ == '__main__':\n from PIL import Image\n from lib.fast_rcnn.config import cfg_from_file\n cfg_from_file('./ctpn/ctpn/text.yml')\n im = Image.open('./test_images/1.jpg')\n img = np.array(im.convert('RGB'))\n text_recs, img_drawed, img = text_detect(img)\n Image.fromarray(img_drawed).save('result.jpg')\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "numpy.linalg.norm", "tensorflow.ConfigProto", "tensorflow.GPUOptions", "tensorflow.Session", "tensorflow.train.Saver" ] ]
ChuckHastings/cugraph
[ "f73b7b47124f56cf17202492f469270c0a1858a1" ]
[ "python/cugraph/sssp/test_sssp.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nfrom itertools import product\nimport time\n\nimport numpy as np\nimport pytest\nfrom scipy.io import mmread\n\nimport cudf\nimport cugraph\nfrom librmm_cffi import librmm as rmm\nfrom librmm_cffi import librmm_config as rmm_cfg\n\n# Temporarily suppress warnings till networkX fixes deprecation warnings\n# (Using or importing the ABCs from 'collections' instead of from\n# 'collections.abc' is deprecated, and in 3.8 it will stop working) for\n# python 3.7. Also, this import networkx needs to be relocated in the\n# third-party group once this gets fixed.\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import networkx as nx\n\n\nprint('Networkx version : {} '.format(nx.__version__))\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef read_csv_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return cudf.read_csv(mm_file, delimiter=' ',\n dtype=['int32', 'int32', 'float32'], header=None)\n\n\ndef cugraph_call(cu_M, source, edgevals=False):\n\n # Device data\n sources = cu_M['0']\n destinations = cu_M['1']\n if edgevals is False:\n values = None\n else:\n values = cu_M['2']\n\n print('sources size = ' + str(len(sources)))\n print('destinations size = ' + str(len(destinations)))\n\n # cugraph Pagerank Call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, values)\n\n print('cugraph Solving... ')\n t1 = time.time()\n\n dist = cugraph.sssp(G, source)\n\n t2 = time.time() - t1\n print('Time : '+str(t2))\n\n distances = []\n dist_np = dist['distance'].to_array()\n for i, d in enumerate(dist_np):\n distances.append((i, d))\n\n return distances\n\n\ndef networkx_call(M, source, edgevals=False):\n\n print('Format conversion ... ')\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n # Directed NetworkX graph\n G = nx.Graph(M)\n Gnx = G.to_undirected()\n\n print('NX Solving... ')\n t1 = time.time()\n\n if edgevals is False:\n path = nx.single_source_shortest_path_length(Gnx, source)\n else:\n path = nx.single_source_dijkstra_path_length(Gnx, source)\n\n t2 = time.time() - t1\n\n print('Time : ' + str(t2))\n\n return path\n\n\nDATASETS = ['../datasets/dolphins',\n '../datasets/karate',\n '../datasets/netscience']\n\nSOURCES = [1]\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', DATASETS)\[email protected]('source', SOURCES)\ndef test_sssp(managed, pool, graph_file, source):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file+'.mtx')\n cu_M = read_csv_file(graph_file+'.csv')\n cu_paths = cugraph_call(cu_M, source)\n nx_paths = networkx_call(M, source)\n\n # Calculating mismatch\n err = 0\n\n for i in range(len(cu_paths)):\n if (cu_paths[i][1] != np.finfo(np.float32).max):\n if(cu_paths[i][1] != nx_paths[cu_paths[i][0]]):\n err = err + 1\n else:\n if (cu_paths[i][0] in nx_paths.keys()):\n err = err + 1\n\n assert err == 0\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', ['../datasets/netscience'])\[email protected]('source', SOURCES)\ndef test_sssp_edgevals(managed, pool, graph_file, source):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file+'.mtx')\n cu_M = read_csv_file(graph_file+'.csv')\n cu_paths = cugraph_call(cu_M, source, edgevals=True)\n nx_paths = networkx_call(M, source, edgevals=True)\n\n # Calculating mismatch\n err = 0\n print(cu_paths)\n print(nx_paths)\n print(len(cu_paths))\n for i in range(len(cu_paths)):\n if (cu_paths[i][1] != np.finfo(np.float32).max):\n if(cu_paths[i][1] != nx_paths[cu_paths[i][0]]):\n err = err + 1\n else:\n if (cu_paths[i][0] in nx_paths.keys()):\n err = err + 1\n\n assert err == 0\n" ]
[ [ "scipy.io.mmread", "numpy.finfo" ] ]
dpastoresc/NarrativeDynamics
[ "bc0c502744c215274d34a23cbce6ad6a9d39a333" ]
[ "processTweetsCreateNetwork.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\nimport mysql.connector\nfrom sqlalchemy import create_engine\n\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nimport string\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nfrom nltk.stem import SnowballStemmer\n\nimport pickle\nimport itertools\n\nimport networkx as nx\nimport time\nfrom datetime import datetime, timedelta, date\nfrom timeit import default_timer as timer\n\nfrom sys import argv\n\n#Se pasa como argumento el nombre de la tabla de la base de datos a procesar\ndb_name_table = str(argv[1])\n\nprint(type(str (db_name_table)))\n\nstart=timer()\n\n# ====== Connection ====== #\n# Connecting to mysql by providing a sqlachemy engine\n\n#SQLAlchemy URI looks like this : 'mysql+mysqlconnector://user:password@host_ip:port/database'\n\n#------mysql carlota--------\n#[email protected]:3306\n#root\n#password\n\n#------mysql carlota--------\n\nengine = create_engine('mysql+mysqlconnector://root:[email protected]:3306/twitterdb2',pool_recycle=3600)\n\n#Reading database table to a dataframe\nquery = 'SELECT * FROM '+ db_name_table\ndata = pd.read_sql(query, engine)\n\n# ====== PROCESS THE DATA ====== #\n\n#Campos tweet_id,text,hashtags\ndf = data.loc[:,('tweet_id','text','hashtags')]\n\n#sanity check, let’s look at the length of the string in text column in each entry.\ndf['pre_clean_len'] = [len(t) for t in df['text']]\n\n\n#Funcion para eliminar puntuacion y numeros \ndef remove_punct(text):\n \n punt = list(string.punctuation)#string.puntuation es un string - necesario convertirlo a lista\n punt.extend(['¿', '¡','’', '”', '“','•']) #añadir puntuacion española \n text = \"\".join([char for char in text if char not in punt])\n text = re.sub('[0-9]+', '', text) #numeros ??????\n return text\n\n#lista con nombres propios en español - eliminar en preprocess\n\ndf_fnames = pd.read_json('https://query.data.world/s/rr6djouhowpilvpxxqzbeyjroemoyp')\ndf_mnames = pd.read_json('https://query.data.world/s/5elfg6gzndy3qcsepzwxotmik7tvbm')\nfemale_names = df_fnames['name'].to_list()\nmale_names = df_mnames['name'].to_list()\n\nspanish_names = male_names+female_names\nspanish_names = list(map(lambda x:x.lower(),spanish_names))\n\n#Funcion limpiar el texto\ndef clean_text(text):\n #---------Emoji patterns---------\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n \"]+\", flags=re.UNICODE)\n text_lw = text.lower()\n \n text_m = re.sub(r'@[A-Za-z0-9]+','',text_lw) #Remove mentions (is already stored in mentions)\n text_url = re.sub('https?://[A-Za-z0-9./]+','',text_m) #Remove urls\n text_e = re.sub (emoji_pattern, '',text_url ) #Remove emojis \n text_ml = (BeautifulSoup(text_e, 'lxml').get_text())\n text_p = remove_punct(text_ml) #Remove Punt\n #Removing ...\n re1 = r\"…[\\s]\"\n re2 = r\"…$\"\n text_pp = re.sub(re2,'', re.sub (re1,' ', text_p))\n tokens = nltk.word_tokenize(text_pp) #Tokeniz\n \n #Removing stop words in spanish \n #Carlota: Añadido rt (aparece al comienzo de cada retweet)\n stopwords_spanish = stopwords.words('spanish')\n stopwords_spanish.extend(('rt','\\u200d','https'))\n stopwords_spanish.extend((list(string.ascii_lowercase))) #Añade el abecedario\n \n tokens_stpw = list(filter(lambda x: x not in stopwords_spanish, tokens))\n \n return tokens_stpw\n\n#Crea una columna en el dataframe aplicando la funcion clean_text()\ndf['tokens_text'] = df['text'].apply(lambda x: clean_text(x))\n\n#Dataframe processed\ndfProcessed = df.loc[:,('tweet_id','tokens_text')]\n\n#Save the processed dataframe to pickle - folder name dfs\npath_dfs = './pkls/dfs/'\ndfProcessed.to_pickle(path_dfs+ db_name_table+'Processed.pkl')\n\n#List of lists including all words --> frequency distribution\ncorpus_Post = list(itertools.chain.from_iterable(df['tokens_text'].to_list()))\n\n#creacion del objeto FreqDist para el corpus construido\ndistFreq = nltk.FreqDist(corpus_Post)\n\n#Convertirlo en dictionary para llamarlo en la creacion del grafo\ndict_distFreqPost = dict(distFreq)\n\n#Save to pickle en una carpeta que contiene los pkls (dictionaries)\npath_dicts = './pkls/dicts/'\nwith open(path_dicts+'distFreq'+db_name_table+'.pkl', 'wb') as f:\n pickle.dump(dict_distFreqPost, f)\n \n\n# ====== BUILD THE GRAPH ====== #\n\nGu=nx.Graph()\n\n#Iterar tokens\nfor index, row in dfProcessed.iterrows():\n \n tokens_list = row['tokens_text']\n\n #Lista con las palabras clave definidas \n keywords_list = ['ODS', 'sostenibilidad', 'desarrollo', 'sostenible', 'cooperación']\n \n for kw in keywords_list:\n if kw in tokens_list:\n keyword = kw\n \n for word in tokens_list:\n \n if word != keyword:\n relatedword = word\n\n #\n if not Gu.has_node(keyword):\n Gu.add_node(keyword, freq = dict_distFreqPost.get(keyword, 1))\n if not Gu.has_node(relatedword):\n Gu.add_node(relatedword, freq = dict_distFreqPost.get(relatedword, 1))\n if not Gu.has_edge(keyword,relatedword): \n Gu.add_edge(keyword,relatedword) \n #Flow\n Gu[keyword][relatedword]['weight']=1\n \n else:\n Gu[keyword][relatedword]['weight']=Gu[keyword][relatedword]['weight']+1\n\n #Guarda los networks graphs en una carpeta llamada graphs\n path_graphs = './graphs/' \n nx.write_gexf(Gu, path_graphs+db_name_table+'NetworkGraph.gexf')\n \n\nend = timer()\nprint(end - start)\n\nprint('Finalizado OK')\n" ]
[ [ "pandas.read_json", "pandas.read_sql" ] ]
YihsunEthanCheng/dog-breed-classifier
[ "3c036bdb7f31a92894aef9d46148dd2be916cbac" ]
[ "dog_breed_detector.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 11 00:22:40 2018\r\n\r\n@author: Ethan Cheng\r\n\"\"\"\r\nfrom sklearn.datasets import load_files \r\nfrom keras.utils import np_utils\r\nimport numpy as np\r\nfrom glob import glob\r\nfrom keras.preprocessing import image \r\nfrom tqdm import tqdm\r\nimport matplotlib.pyplot as plt\r\n\r\n#### get data\r\n# define function to load train, test, and validation datasets\r\ndef load_dataset(path):\r\n data = load_files(path)\r\n dog_files = np.array(data['filenames'])\r\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\r\n return dog_files, dog_targets\r\n\r\n#% load train, test, and validation datasets\r\ntrain_files, train_targets = load_dataset('dogImages/train')\r\nvalid_files, valid_targets = load_dataset('dogImages/valid')\r\ntest_files, test_targets = load_dataset('dogImages/test')\r\n# load list of dog names\r\ndog_names = [item[20:-1] for item in sorted(glob(\"dogImages/train/*/\"))]\r\n\r\n# print statistics about the dataset\r\nprint('There are %d total dog categories.' % len(dog_names))\r\nprint('There are %s total dog images.\\n' % len(np.hstack([train_files, valid_files, test_files])))\r\nprint('There are %d training dog images.' % len(train_files))\r\nprint('There are %d validation dog images.' % len(valid_files))\r\nprint('There are %d test dog images.'% len(test_files))\r\n\r\n#%\r\ndef path_to_tensor(img_path):\r\n # loads RGB image as PIL.Image.Image type\r\n img = image.load_img(img_path, target_size=(224, 224))\r\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\r\n x = image.img_to_array(img)\r\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\r\n return np.expand_dims(x, axis=0)\r\n\r\ndef paths_to_tensor(img_paths):\r\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\r\n return np.vstack(list_of_tensors)\r\n\r\nfrom PIL import ImageFile \r\nImageFile.LOAD_TRUNCATED_IMAGES = True \r\n\r\n# pre-process the data for Keras\r\ntrain_tensors = paths_to_tensor(train_files).astype('float32')/255\r\nvalid_tensors = paths_to_tensor(valid_files).astype('float32')/255\r\ntest_tensors = paths_to_tensor(test_files).astype('float32')/255\r\n\r\n#%\r\n\r\nplt.imshow(train_tensors[0])\r\n\r\n\r\n#%\r\n\r\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\r\nfrom keras.layers import Dropout, Dense\r\nfrom keras.models import Sequential\r\n\r\nmodel = Sequential()\r\n\r\n### TODO: Define your architecture.\r\nmodel.add(Conv2D(filters=32, kernel_size=2, padding='valid', activation='relu', input_shape=(224, 224, 3)))\r\nmodel.add(MaxPooling2D(pool_size=(2,2), padding = 'valid'))\r\nmodel.add(Conv2D(filters=64, kernel_size=2, padding='valid', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), padding = 'valid'))\r\nmodel.add(Conv2D(filters=128, kernel_size=2, padding='valid', activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2,2), strides = (2,2), padding = 'valid'))\r\nmodel.add(GlobalAveragePooling2D()) \r\nmodel.add(Dropout(0.25)) \r\nmodel.add(Dense(1024, activation= None))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Dense(133, activation='softmax'))\r\nmodel.summary()\r\n\r\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\r\n#%\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n# create and configure augmented image generator\r\ndatagen_train = ImageDataGenerator(\r\n width_shift_range=0.1, # randomly shift images horizontally (10% of total width)\r\n height_shift_range=0.1, # randomly shift images vertically (10% of total height)\r\n horizontal_flip=True) # randomly flip images horizontally\r\n\r\n# fit augmented image generator on data\r\ndatagen_train.fit(train_tensors)\r\n\r\n#%%\r\nfrom keras.callbacks import ModelCheckpoint \r\n\r\n### TODO: specify the number of epochs that you would like to use to train the model.\r\n\r\nepochs = 400\r\n\r\n### Do NOT modify the code below this line.\r\n\r\ncheckpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch_c2x2_aug_400_epochs_.hdf5', \r\n verbose=1, save_best_only=True)\r\n\r\n#model.fit(train_tensors, train_targets, \r\n# validation_data=(valid_tensors, valid_targets), shuffle= True,\r\n# epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)\r\n\r\nhist = model.fit_generator(datagen_train.flow(train_tensors, train_targets, batch_size=20),\r\n epochs=epochs, validation_data=(valid_tensors, valid_targets), callbacks=[checkpointer], \r\n verbose=2, shuffle=True) \r\n\r\n#%%\r\nmodel.load_weights('saved_models/weights.best.from_scratch_c2x2_aug_400_epochs.hdf5')\r\n\r\n# evaluate and print test accuracy\r\nscore = model.evaluate(test_tensors, test_targets, verbose=0)\r\nprint('\\n', 'Test accuracy:', score[1])\r\n\r\n#%%\r\n\r\n" ]
[ [ "numpy.hstack", "matplotlib.pyplot.imshow", "numpy.expand_dims", "sklearn.datasets.load_files", "numpy.array", "numpy.vstack" ] ]
SchenbergZY/keras-transformer-xl-yue
[ "2d286e521992e8a13e5b883a6c5315f8967f7e48" ]
[ "tests/test_transformer_xl.py" ]
[ "import os\nimport tempfile\nfrom unittest import TestCase\nimport numpy as np\nfrom keras_transformer_xl.backend import keras\nfrom keras_transformer_xl import build_transformer_xl, set_custom_objects\n\n\nclass TestTransformerXL(TestCase):\n\n def test_build(self):\n model = build_transformer_xl(\n units=6,\n embed_dim=16,\n hidden_dim=12,\n num_token=13,\n num_block=3,\n num_head=2,\n batch_size=3,\n memory_len=15,\n target_len=5,\n dropout=0.1,\n attention_dropout=0.1,\n cutoffs=[3],\n div_val=2,\n )\n set_custom_objects()\n model_path = os.path.join(tempfile.gettempdir(), 'test_transformer_xl_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path)\n model.summary()\n try:\n current_path = os.path.dirname(os.path.abspath(__file__))\n visual_path = os.path.join(current_path, 'test_build.jpg')\n keras.utils.vis_utils.plot_model(model, visual_path)\n except Exception as e:\n pass\n\n def test_fit_batch_changes(self):\n model = build_transformer_xl(\n units=4,\n embed_dim=4,\n hidden_dim=4,\n num_token=2,\n num_block=1,\n num_head=1,\n batch_size=2,\n memory_len=0,\n target_len=5,\n )\n model.compile('adam', 'mse')\n model.summary()\n model.train_on_batch([np.ones((2, 5)), np.zeros((2, 1))], np.zeros((2, 5, 2)))\n model.train_on_batch([np.ones((1, 5)), np.zeros((1, 1))], np.zeros((1, 5, 2)))\n model.train_on_batch([np.ones((2, 5)), np.zeros((2, 1))], np.zeros((2, 5, 2)))\n model.train_on_batch([np.ones((1, 5)), np.zeros((1, 1))], np.zeros((1, 5, 2)))\n" ]
[ [ "numpy.random.random", "numpy.zeros", "numpy.ones" ] ]
krumo/Detectron
[ "48e3236fe2296afcd7b67a29c487cfe85f5860e1" ]
[ "detectron/datasets/json_dataset.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Representation of the standard COCO json dataset format.\n\nWhen working with a new dataset, we strongly suggest to convert the dataset into\nthe COCO json format and use the existing code; it is not recommended to write\ncode to support new dataset formats.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport copy\nimport cPickle as pickle\nimport logging\nimport numpy as np\nimport os\nimport scipy.sparse\n\n# Must happen before importing COCO API (which imports matplotlib)\nimport detectron.utils.env as envu\nenvu.set_up_matplotlib()\n# COCO API\nfrom pycocotools import mask as COCOmask\nfrom pycocotools.coco import COCO\n\nfrom detectron.core.config import cfg\nfrom detectron.utils.timer import Timer\nimport detectron.datasets.dataset_catalog as dataset_catalog\nimport detectron.utils.boxes as box_utils\nimport detectron.utils.segms as segm_utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass JsonDataset(object):\n \"\"\"A class representing a COCO json dataset.\"\"\"\n\n def __init__(self, name):\n assert dataset_catalog.contains(name), \\\n 'Unknown dataset name: {}'.format(name)\n assert os.path.exists(dataset_catalog.get_im_dir(name)), \\\n 'Im dir \\'{}\\' not found'.format(dataset_catalog.get_im_dir(name))\n assert os.path.exists(dataset_catalog.get_ann_fn(name)), \\\n 'Ann fn \\'{}\\' not found'.format(dataset_catalog.get_ann_fn(name))\n logger.debug('Creating: {}'.format(name))\n self.name = name\n self.image_directory = dataset_catalog.get_im_dir(name)\n self.image_prefix = dataset_catalog.get_im_prefix(name)\n self.COCO = COCO(dataset_catalog.get_ann_fn(name))\n self.debug_timer = Timer()\n # Set up dataset classes\n category_ids = self.COCO.getCatIds()\n categories = [c['name'] for c in self.COCO.loadCats(category_ids)]\n self.category_to_id_map = dict(zip(categories, category_ids))\n self.classes = ['__background__'] + categories\n self.num_classes = len(self.classes)\n self.json_category_id_to_contiguous_id = {\n v: i + 1\n for i, v in enumerate(self.COCO.getCatIds())\n }\n self.contiguous_category_id_to_json_id = {\n v: k\n for k, v in self.json_category_id_to_contiguous_id.items()\n }\n self._init_keypoints()\n\n def get_roidb(\n self,\n gt=False,\n proposal_file=None,\n min_proposal_size=2,\n proposal_limit=-1,\n crowd_filter_thresh=0,\n is_source=True\n ):\n \"\"\"Return an roidb corresponding to the json dataset. Optionally:\n - include ground truth boxes in the roidb\n - add proposals specified in a proposals file\n - filter proposals based on a minimum side length\n - filter proposals that intersect with crowd regions\n \"\"\"\n assert gt is True or crowd_filter_thresh == 0, \\\n 'Crowd filter threshold must be 0 if ground-truth annotations ' \\\n 'are not included.'\n image_ids = self.COCO.getImgIds()\n image_ids.sort()\n roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))\n for entry in roidb:\n self._prep_roidb_entry(entry)\n if gt:\n # Include ground-truth object annotations\n self.debug_timer.tic()\n for entry in roidb:\n self._add_gt_annotations(entry)\n logger.debug(\n '_add_gt_annotations took {:.3f}s'.\n format(self.debug_timer.toc(average=False))\n )\n if proposal_file is not None:\n # Include proposals from a file\n self.debug_timer.tic()\n self._add_proposals_from_file(\n roidb, proposal_file, min_proposal_size, proposal_limit,\n crowd_filter_thresh\n )\n logger.debug(\n '_add_proposals_from_file took {:.3f}s'.\n format(self.debug_timer.toc(average=False))\n )\n # if is_source:\n _add_class_assignments(roidb)\n _add_domain_assignments(roidb, is_source)\n # print(roidb)\n return roidb\n\n def _prep_roidb_entry(self, entry):\n \"\"\"Adds empty metadata fields to an roidb entry.\"\"\"\n # Reference back to the parent dataset\n entry['dataset'] = self\n # Make file_name an abs path\n im_path = os.path.join(\n self.image_directory, self.image_prefix + entry['file_name']\n )\n assert os.path.exists(im_path), 'Image \\'{}\\' not found'.format(im_path)\n entry['image'] = im_path\n entry['flipped'] = False\n entry['has_visible_keypoints'] = False\n # Empty placeholders\n entry['boxes'] = np.empty((0, 4), dtype=np.float32)\n entry['segms'] = []\n entry['gt_classes'] = np.empty((0), dtype=np.int32)\n entry['seg_areas'] = np.empty((0), dtype=np.float32)\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(\n np.empty((0, self.num_classes), dtype=np.float32)\n )\n entry['is_crowd'] = np.empty((0), dtype=np.bool)\n entry['is_source'] = np.empty((0), dtype=np.bool)\n # 'box_to_gt_ind_map': Shape is (#rois). Maps from each roi to the index\n # in the list of rois that satisfy np.where(entry['gt_classes'] > 0)\n entry['box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)\n if self.keypoints is not None:\n entry['gt_keypoints'] = np.empty(\n (0, 3, self.num_keypoints), dtype=np.int32\n )\n # Remove unwanted fields that come from the json file (if they exist)\n for k in ['date_captured', 'url', 'license', 'file_name']:\n if k in entry:\n del entry[k]\n\n def _add_gt_annotations(self, entry):\n \"\"\"Add ground truth annotation metadata to an roidb entry.\"\"\"\n ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self.COCO.loadAnns(ann_ids)\n # Sanitize bboxes -- some are invalid\n valid_objs = []\n valid_segms = []\n width = entry['width']\n height = entry['height']\n for obj in objs:\n # crowd regions are RLE encoded\n if segm_utils.is_poly(obj['segmentation']):\n # Valid polygons have >= 3 points, so require >= 6 coordinates\n obj['segmentation'] = [\n p for p in obj['segmentation'] if len(p) >= 6\n ]\n if obj['area'] < cfg.TRAIN.GT_MIN_AREA:\n continue\n if 'ignore' in obj and obj['ignore'] == 1:\n continue\n # Convert form (x1, y1, w, h) to (x1, y1, x2, y2)\n x1, y1, x2, y2 = box_utils.xywh_to_xyxy(obj['bbox'])\n x1, y1, x2, y2 = box_utils.clip_xyxy_to_image(\n x1, y1, x2, y2, height, width\n )\n # Require non-zero seg area and more than 1x1 box size\n if obj['area'] > 0 and x2 > x1 and y2 > y1:\n obj['clean_bbox'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n valid_segms.append(obj['segmentation'])\n num_valid_objs = len(valid_objs)\n\n boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)\n gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)\n gt_overlaps = np.zeros(\n (num_valid_objs, self.num_classes),\n dtype=entry['gt_overlaps'].dtype\n )\n seg_areas = np.zeros((num_valid_objs), dtype=entry['seg_areas'].dtype)\n is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)\n box_to_gt_ind_map = np.zeros(\n (num_valid_objs), dtype=entry['box_to_gt_ind_map'].dtype\n )\n if self.keypoints is not None:\n gt_keypoints = np.zeros(\n (num_valid_objs, 3, self.num_keypoints),\n dtype=entry['gt_keypoints'].dtype\n )\n\n im_has_visible_keypoints = False\n for ix, obj in enumerate(valid_objs):\n cls = self.json_category_id_to_contiguous_id[obj['category_id']]\n boxes[ix, :] = obj['clean_bbox']\n gt_classes[ix] = cls\n seg_areas[ix] = obj['area']\n is_crowd[ix] = obj['iscrowd']\n box_to_gt_ind_map[ix] = ix\n if self.keypoints is not None:\n gt_keypoints[ix, :, :] = self._get_gt_keypoints(obj)\n if np.sum(gt_keypoints[ix, 2, :]) > 0:\n im_has_visible_keypoints = True\n if obj['iscrowd']:\n # Set overlap to -1 for all classes for crowd objects\n # so they will be excluded during training\n gt_overlaps[ix, :] = -1.0\n else:\n gt_overlaps[ix, cls] = 1.0\n entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)\n entry['segms'].extend(valid_segms)\n # To match the original implementation:\n # entry['boxes'] = np.append(\n # entry['boxes'], boxes.astype(np.int).astype(np.float), axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)\n entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)\n entry['gt_overlaps'] = np.append(\n entry['gt_overlaps'].toarray(), gt_overlaps, axis=0\n )\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])\n entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)\n entry['box_to_gt_ind_map'] = np.append(\n entry['box_to_gt_ind_map'], box_to_gt_ind_map\n )\n if self.keypoints is not None:\n entry['gt_keypoints'] = np.append(\n entry['gt_keypoints'], gt_keypoints, axis=0\n )\n entry['has_visible_keypoints'] = im_has_visible_keypoints\n\n def _add_proposals_from_file(\n self, roidb, proposal_file, min_proposal_size, top_k, crowd_thresh\n ):\n \"\"\"Add proposals from a proposals file to an roidb.\"\"\"\n logger.info('Loading proposals from: {}'.format(proposal_file))\n with open(proposal_file, 'r') as f:\n proposals = pickle.load(f)\n id_field = 'indexes' if 'indexes' in proposals else 'ids' # compat fix\n _sort_proposals(proposals, id_field)\n box_list = []\n for i, entry in enumerate(roidb):\n if i % 2500 == 0:\n logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))\n boxes = proposals['boxes'][i]\n # Sanity check that these boxes are for the correct image id\n assert entry['id'] == proposals[id_field][i]\n # Remove duplicate boxes and very small boxes and then take top k\n boxes = box_utils.clip_boxes_to_image(\n boxes, entry['height'], entry['width']\n )\n keep = box_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = box_utils.filter_small_boxes(boxes, min_proposal_size)\n boxes = boxes[keep, :]\n if top_k > 0:\n boxes = boxes[:top_k, :]\n box_list.append(boxes)\n _merge_proposal_boxes_into_roidb(roidb, box_list)\n if crowd_thresh > 0:\n _filter_crowd_proposals(roidb, crowd_thresh)\n\n def _init_keypoints(self):\n \"\"\"Initialize COCO keypoint information.\"\"\"\n self.keypoints = None\n self.keypoint_flip_map = None\n self.keypoints_to_id_map = None\n self.num_keypoints = 0\n # Thus far only the 'person' category has keypoints\n if 'person' in self.category_to_id_map:\n cat_info = self.COCO.loadCats([self.category_to_id_map['person']])\n else:\n return\n\n # Check if the annotations contain keypoint data or not\n if 'keypoints' in cat_info[0]:\n keypoints = cat_info[0]['keypoints']\n self.keypoints_to_id_map = dict(\n zip(keypoints, range(len(keypoints))))\n self.keypoints = keypoints\n self.num_keypoints = len(keypoints)\n self.keypoint_flip_map = {\n 'left_eye': 'right_eye',\n 'left_ear': 'right_ear',\n 'left_shoulder': 'right_shoulder',\n 'left_elbow': 'right_elbow',\n 'left_wrist': 'right_wrist',\n 'left_hip': 'right_hip',\n 'left_knee': 'right_knee',\n 'left_ankle': 'right_ankle'}\n\n def _get_gt_keypoints(self, obj):\n \"\"\"Return ground truth keypoints.\"\"\"\n if 'keypoints' not in obj:\n return None\n kp = np.array(obj['keypoints'])\n x = kp[0::3] # 0-indexed x coordinates\n y = kp[1::3] # 0-indexed y coordinates\n # 0: not labeled; 1: labeled, not inside mask;\n # 2: labeled and inside mask\n v = kp[2::3]\n num_keypoints = len(obj['keypoints']) / 3\n assert num_keypoints == self.num_keypoints\n gt_kps = np.ones((3, self.num_keypoints), dtype=np.int32)\n for i in range(self.num_keypoints):\n gt_kps[0, i] = x[i]\n gt_kps[1, i] = y[i]\n gt_kps[2, i] = v[i]\n return gt_kps\n\n\ndef add_proposals(roidb, rois, scales, crowd_thresh):\n \"\"\"Add proposal boxes (rois) to an roidb that has ground-truth annotations\n but no proposals. If the proposals are not at the original image scale,\n specify the scale factor that separate them in scales.\n \"\"\"\n box_list = []\n for i in range(len(roidb)):\n inv_im_scale = 1. / scales[i]\n idx = np.where(rois[:, 0] == i)[0]\n box_list.append(rois[idx, 1:] * inv_im_scale)\n _merge_proposal_boxes_into_roidb(roidb, box_list)\n if crowd_thresh > 0:\n _filter_crowd_proposals(roidb, crowd_thresh)\n _add_class_assignments(roidb)\n\n\ndef _merge_proposal_boxes_into_roidb(roidb, box_list):\n \"\"\"Add proposal boxes to each roidb entry.\"\"\"\n assert len(box_list) == len(roidb)\n for i, entry in enumerate(roidb):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n gt_overlaps = np.zeros(\n (num_boxes, entry['gt_overlaps'].shape[1]),\n dtype=entry['gt_overlaps'].dtype\n )\n box_to_gt_ind_map = -np.ones(\n (num_boxes), dtype=entry['box_to_gt_ind_map'].dtype\n )\n\n # Note: unlike in other places, here we intentionally include all gt\n # rois, even ones marked as crowd. Boxes that overlap with crowds will\n # be filtered out later (see: _filter_crowd_proposals).\n gt_inds = np.where(entry['gt_classes'] > 0)[0]\n if len(gt_inds) > 0:\n gt_boxes = entry['boxes'][gt_inds, :]\n gt_classes = entry['gt_classes'][gt_inds]\n proposal_to_gt_overlaps = box_utils.bbox_overlaps(\n boxes.astype(dtype=np.float32, copy=False),\n gt_boxes.astype(dtype=np.float32, copy=False)\n )\n # Gt box that overlaps each input box the most\n # (ties are broken arbitrarily by class order)\n argmaxes = proposal_to_gt_overlaps.argmax(axis=1)\n # Amount of that overlap\n maxes = proposal_to_gt_overlaps.max(axis=1)\n # Those boxes with non-zero overlap with gt boxes\n I = np.where(maxes > 0)[0]\n # Record max overlaps with the class of the appropriate gt box\n gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]\n entry['boxes'] = np.append(\n entry['boxes'],\n boxes.astype(entry['boxes'].dtype, copy=False),\n axis=0\n )\n entry['gt_classes'] = np.append(\n entry['gt_classes'],\n np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)\n )\n entry['seg_areas'] = np.append(\n entry['seg_areas'],\n np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)\n )\n entry['gt_overlaps'] = np.append(\n entry['gt_overlaps'].toarray(), gt_overlaps, axis=0\n )\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])\n entry['is_crowd'] = np.append(\n entry['is_crowd'],\n np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)\n )\n entry['box_to_gt_ind_map'] = np.append(\n entry['box_to_gt_ind_map'],\n box_to_gt_ind_map.astype(\n entry['box_to_gt_ind_map'].dtype, copy=False\n )\n )\n\n\ndef _filter_crowd_proposals(roidb, crowd_thresh):\n \"\"\"Finds proposals that are inside crowd regions and marks them as\n overlap = -1 with each ground-truth rois, which means they will be excluded\n from training.\n \"\"\"\n for entry in roidb:\n gt_overlaps = entry['gt_overlaps'].toarray()\n crowd_inds = np.where(entry['is_crowd'] == 1)[0]\n non_gt_inds = np.where(entry['gt_classes'] == 0)[0]\n if len(crowd_inds) == 0 or len(non_gt_inds) == 0:\n continue\n crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])\n non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])\n iscrowd_flags = [int(True)] * len(crowd_inds)\n ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)\n bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]\n gt_overlaps[non_gt_inds[bad_inds], :] = -1\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)\n\n\ndef _add_class_assignments(roidb):\n \"\"\"Compute object category assignment for each box associated with each\n roidb entry.\n \"\"\"\n for entry in roidb:\n gt_overlaps = entry['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n entry['max_classes'] = max_classes\n entry['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n\ndef _sort_proposals(proposals, id_field):\n \"\"\"Sort proposals by the specified id field.\"\"\"\n order = np.argsort(proposals[id_field])\n fields_to_sort = ['boxes', id_field, 'scores']\n for k in fields_to_sort:\n proposals[k] = [proposals[k][i] for i in order]\n\ndef _add_domain_assignments(roidb, is_source):\n \"\"\"Compute object category assignment for each box associated with each\n roidb entry.\n \"\"\"\n for entry in roidb:\n entry['is_source'] = np.append(entry['is_source'], is_source)\n" ]
[ [ "numpy.sum", "numpy.ones", "numpy.append", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where", "numpy.empty" ] ]
DongChengdongHangZhou/caffe-to-pytorch
[ "5e3104f3aa77d35bad5d2de235b067460c136fd5" ]
[ "caffe2pth/detection.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"2017.12.16 by xiaohang\nBorrow from: github: https://github.com/marvis/pytorch-caffe\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat([(boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2]], 1) # w, h\n\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n threshold: (float) The overlap threshold used when mathing boxes.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n variances: (tensor) Variances corresponding to each prior coord,\n Shape: [num_priors, 4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n idx: (int) current batch index\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n\n #best_prior_idx.squeeze_(1)\n #best_prior_overlap.squeeze_(1)\n #best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n best_truth_overlap[best_prior_idx[j]] = best_prior_overlap[j]\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] # Shape: [num_priors]\n conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes (tensor), Shape: [num_priors, 4]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count\n\n\ndef clip_boxes(boxes):\n boxes = torch.clamp(boxes, min = 0.0, max = 1.0)\n return boxes\n\n\nclass Detection(nn.Module):\n \"\"\"At test time, Detect is the final layer of SSD. Decode location preds,\n apply non-maximum suppression to location predictions based on conf\n scores and threshold to a top_k number of output predictions for both\n confidence score and locations.\n \"\"\"\n def __init__(self, num_classes, bkg_label, top_k, conf_thresh, nms_thresh, keep_top_k):\n super(Detection, self).__init__()\n self.num_classes = num_classes\n self.background_label = bkg_label\n self.top_k = top_k\n # Parameters used in nms.\n self.nms_thresh = nms_thresh\n if nms_thresh <= 0:\n raise ValueError('nms_threshold must be non negative.')\n self.conf_thresh = conf_thresh\n self.keep_top_k = keep_top_k\n self.variance = [0.1, 0.2]\n\n def forward(self, loc, conf, prior):\n \"\"\"\n Args:\n loc: (tensor) Loc preds from loc layers\n Shape: [batch,num_priors*4]\n conf: (tensor) Shape: Conf preds from conf layers\n Shape: [batch, num_priors*num_classes]\n prior: (tensor) Prior boxes and variances from priorbox layers\n Shape: [1,2, num_priors*4]\n \"\"\"\n num = loc.size(0)\n loc_data = loc.data\n conf_data = conf.data\n prior_data = prior.data\n\n num_classes = self.num_classes\n num_priors = prior_data.size(2)/4\n if num == 1:\n # size batch x num_classes x num_priors\n conf_preds = conf_data.view(num_priors, self.num_classes).t().contiguous().unsqueeze(0)\n else:\n conf_preds = conf_data.view(num, num_priors,\n self.num_classes).transpose(2, 1)\n\n # Decode predictions into bboxes.\n assert(num == 1)\n if num_classes == 2:\n loc_data = loc_data[0].view(-1, 4).clone()\n prior_data = center_size(prior_data[0][0].view(-1,4).clone())\n decoded_boxes = decode(loc_data, prior_data, self.variance)\n #decoded_boxes = clip_boxes(decoded_boxes)\n \n # For each class, perform nms\n conf_scores = conf_preds[0].clone()\n num_det = 0\n cl = 1\n c_mask = conf_scores[cl].gt(self.conf_thresh)\n if c_mask.sum() == 0:\n output = torch.Tensor([0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]).view(1,1,1,7)\n return Variable(conf.data.new().resize_(output.size()).copy_(output))\n scores = conf_scores[cl][c_mask]\n l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n boxes = decoded_boxes[l_mask].view(-1, 4)\n # idx of highest scoring and non-overlapping boxes per class\n ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n count = min(count, self.keep_top_k)\n extra_info = torch.FloatTensor([0.0, 1.0]).view(1,2).expand(num_priors,2)\n extra_info = conf.data.new().resize_(extra_info.size()).copy_(extra_info)\n output = torch.cat((extra_info[ids[:count]], scores[ids[:count]].unsqueeze(1),\n boxes[ids[:count]]), 1)\n \n #flt = self.output[:, :, :count, :].contiguous().view(-1, 5)\n return Variable(output.unsqueeze(0).unsqueeze(0))\n else:\n loc_data = loc_data[0].view(-1, 4).clone()\n prior_data = center_size(prior_data[0][0].view(-1,4).clone())\n decoded_boxes = decode(loc_data, prior_data, self.variance)\n #decoded_boxes = clip_boxes(decoded_boxes)\n \n # For each class, perform nms\n conf_scores = conf_preds[0].clone()\n num_det = 0\n cl = 1\n outputs = []\n for cl in range(1, num_classes):\n c_mask = conf_scores[cl].gt(self.conf_thresh)\n if c_mask.sum() == 0:\n continue\n scores = conf_scores[cl][c_mask]\n l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)\n boxes = decoded_boxes[l_mask].view(-1, 4)\n # idx of highest scoring and non-overlapping boxes per class\n ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)\n count = min(count, self.keep_top_k)\n extra_info = torch.FloatTensor([0.0, cl]).view(1,2).expand(count,2)\n extra_info = conf.data.new().resize_(extra_info.size()).copy_(extra_info)\n output = torch.cat((extra_info, scores[ids[:count]].unsqueeze(1),\n boxes[ids[:count]]), 1)\n outputs.append(output)\n \n outputs = torch.cat(outputs, 0)\n #flt = self.output[:, :, :count, :].contiguous().view(-1, 5)\n return Variable(outputs.unsqueeze(0).unsqueeze(0))\n\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + ¦Áloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by ¦Áwhich is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, use_gpu=True):\n super(MultiBoxLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = [0.1, 0.2]\n\n def forward(self, loc_data, conf_data, priors, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n ground_truth (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n #loc_data, conf_data, priors = predictions\n num = loc_data.size(0)\n num_priors = (loc_data.size(1)/4)\n num_classes = self.num_classes\n loc_data = loc_data.view(num, num_priors, 4)\n conf_data = conf_data.view(num, num_priors, num_classes)\n priors = priors[0][0].view(num_priors, 4)\n targets = targets.view(-1, 8)\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n conf_t = torch.zeros(num, num_priors).long()\n for idx in range(num):\n sub_mask = (targets[:,0] == idx)\n if sub_mask.data.float().sum() == 0:\n continue\n sub_targets = targets[sub_mask.view(-1,1).expand_as(targets)].view(-1,8)\n truths = sub_targets[:, 3:7].data\n labels = sub_targets[:, 1].data\n defaults = priors.data\n defaults = center_size(defaults)\n match(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n\n pos = conf_t > 0\n num_pos = pos.sum(keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n\n loss_c = log_sum_exp(batch_conf).view(-1,1) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c[pos] = 0 # filter out pos boxes for now\n loss_c = loss_c.view(num, -1)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = Variable(torch.clamp(self.negpos_ratio*num_pos.data.float(), max=pos.size(1)-1).long())\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + ¦Áloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n loss_l /= N\n loss_c /= N\n return loss_l + loss_c\n\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.zeros", "torch.nn.functional.cross_entropy", "torch.exp", "torch.mul", "torch.log", "torch.FloatTensor", "torch.nn.functional.smooth_l1_loss", "torch.clamp", "torch.index_select", "torch.autograd.Variable" ] ]
gregor-robinson/lolo
[ "54fd08c90b19b5849d1b2bbfbfea04a856935c84" ]
[ "python/lolopy/tests/test_metrics.py" ]
[ "from lolopy.metrics import (root_mean_squared_error, standard_confidence, standard_error, uncertainty_correlation)\nfrom numpy.random import multivariate_normal, uniform, normal, seed\nfrom unittest import TestCase\n\n\nclass TestMetrics(TestCase):\n\n def test_rmse(self):\n self.assertAlmostEqual(root_mean_squared_error([1, 2], [1, 2]), 0)\n self.assertAlmostEqual(root_mean_squared_error([4, 5], [1, 2]), 3)\n\n def test_standard_confidene(self):\n self.assertAlmostEqual(standard_confidence([1, 2], [2, 3], [1.5, 0.9]), 0.5)\n self.assertAlmostEqual(standard_confidence([1, 2], [2, 3], [1.5, 1.1]), 1)\n\n def test_standard_error(self):\n self.assertAlmostEqual(standard_error([1, 2], [1, 2], [1, 1]), 0)\n self.assertAlmostEqual(standard_error([4, 5], [1, 2], [3, 3]), 1)\n\n def test_uncertainty_correlation(self):\n seed(1)\n sample_size = 2 ** 15\n for expected in [0, 0.75]:\n # Make the error distribution\n y_true = uniform(0, 1, sample_size)\n\n # Make the errors and uncertainties\n draw = multivariate_normal([0, 0], [[1, expected], [expected, 1]], sample_size)\n\n # Add the errors, and separate out the standard deviations\n y_pred = y_true + [d[0] * normal(0, 1) for d in draw]\n y_std = [abs(d[1]) for d in draw]\n\n # Test with a very large tolerance for now\n measured_corr = uncertainty_correlation(y_true, y_pred, y_std)\n corr_error = abs(measured_corr - expected)\n self.assertLess(corr_error, 0.25, 'Error for {:.2f}: {:.2f}'.format(expected, corr_error))\n" ]
[ [ "numpy.random.uniform", "numpy.random.multivariate_normal", "numpy.random.normal", "numpy.random.seed" ] ]
kaldap/image-analogies
[ "0867aedfae7dfc0d27c42805a3d07f7b9eb7eaa2" ]
[ "image_analogy/losses/patches.py" ]
[ "import sys\nfrom itertools import product\n\nimport numpy as np\nimport torch\nfrom tensorflow.keras import backend as K\nfrom tensorflow import image as TFI\nimport tensorflow as tf\nfrom sklearn.feature_extraction.image import reconstruct_from_patches_2d\n\n\ndef make_patches(x, patch_size, patch_stride):\n '''Break image `x` up into a bunch of patches.'''\n # from theano.tensor.nnet.neighbours import images2neibs\n x = K.expand_dims(x, 0)\n x = K.permute_dimensions(x, (0, 2, 3, 1))\n # patches = images2neibs(x,\n # (patch_size, patch_size), (patch_stride, patch_stride),\n # mode='valid')\n\n # neibs are sorted per-channel\n patches = TFI.extract_patches(x, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1], [1, 1, 1, 1], 'VALID')\n patches = K.reshape(patches, (K.shape(patches)[1] * K.shape(patches)[2], patch_size, patch_size, K.shape(x)[3]))\n patches = K.permute_dimensions(patches, (0, 3, 1, 2)) # Nebo 0231\n # patches = K.reshape(patches, (K.shape(x)[1], K.shape(patches)[0] // K.shape(x)[1], patch_size, patch_size))\n # patches = K.permute_dimensions(patches, (1, 0, 2, 3))\n patches_norm = K.sqrt(K.sum(K.square(patches), axis=(1,2,3), keepdims=True))\n return patches, patches_norm\n\n\ndef reconstruct_from_patches_2d(patches, image_size):\n '''This is from scikit-learn. I thought it was a little overkill\n to require it just for this function.\n '''\n i_h, i_w = image_size[:2]\n p_h, p_w = patches.shape[1:3]\n img = np.zeros(image_size, dtype=np.float32)\n # compute the dimensions of the patches array\n n_h = i_h - p_h + 1\n n_w = i_w - p_w + 1\n for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):\n img[i:i + p_h, j:j + p_w] += p\n\n for i in range(i_h):\n for j in range(i_w):\n # divide by the amount of overlap\n # XXX: is this the most efficient way? memory-wise yes, cpu wise?\n img[i, j] /= float(min(i + 1, p_h, i_h - i) *\n min(j + 1, p_w, i_w - j))\n return img\n\n\ndef combine_patches(patches, out_shape):\n '''Reconstruct an image from these `patches`'''\n patches = patches.transpose(0, 2, 3, 1)\n recon = reconstruct_from_patches_2d(patches, out_shape)\n return recon.transpose(2, 0, 1).astype(np.float32)\n\n\ndef find_patch_matches(a, a_norm, b):\n '''For each patch in A, find the best matching patch in B'''\n b = b[:, :, ::-1, ::-1]\n #convs = K.reshape(K.batch_dot(\n # K.reshape(a, (K.shape(a)[0] * K.shape(a)[1], K.shape(a)[2] * K.shape(a)[3])),\n # K.reshape(b, (K.shape(b)[0] * K.shape(b)[1], K.shape(b)[2] * K.shape(b)[3])),\n # axes=1\n #), (K.shape(a)[0], K.shape(a)[1], 1, 1))\n\n a = K.permute_dimensions(a, (0, 2, 3, 1))\n b = K.permute_dimensions(b, (2, 3, 1, 0))\n convs = K.conv2d(a, b, padding='valid', data_format='channels_last')\n convs = K.reshape(convs, (K.shape(convs)[0], K.shape(convs)[3]))\n a_norm = K.reshape(a_norm, (K.shape(a_norm)[0], 1))\n argmax = K.reshape(K.argmax(convs / a_norm, axis=1), (K.shape(a_norm)[0], 1))\n return argmax\n" ]
[ [ "tensorflow.keras.backend.conv2d", "tensorflow.keras.backend.permute_dimensions", "sklearn.feature_extraction.image.reconstruct_from_patches_2d", "tensorflow.keras.backend.square", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.backend.argmax", "tensorflow.image.extract_patches", "numpy.zeros" ] ]
azmikamis/tf-transform-examples
[ "2ad36823a7f202312ca2e5b665485aa42bcaa4e2" ]
[ "03_simple_example_readtransform.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pprint\nimport tempfile\n\nimport apache_beam as beam\nimport tensorflow as tf\nimport tensorflow_transform as tft\nimport tensorflow_transform.beam.impl as tft_beam\nfrom tensorflow_transform.beam.tft_beam_io import transform_fn_io\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\n\n\ndef main():\n def preprocessing_fn(inputs):\n x = inputs['x']\n y = inputs['y']\n s = inputs['s']\n x_centered = x - tft.mean(x)\n y_normalized = tft.scale_to_0_1(y)\n s_integerized = tft.compute_and_apply_vocabulary(s)\n x_centered_times_y_normalized = (x_centered * y_normalized)\n return {\n 'x_centered': x_centered,\n 'y_normalized': y_normalized,\n 'x_centered_times_y_normalized': x_centered_times_y_normalized,\n 's_integerized': s_integerized\n }\n\n raw_data = [\n {'x': 1, 'y': 1, 's': 'hello'},\n {'x': 2, 'y': 2, 's': 'world'},\n {'x': 3, 'y': 3, 's': 'hello'}\n ]\n\n raw_data_metadata = dataset_metadata.DatasetMetadata(\n dataset_schema.from_feature_spec({\n 's': tf.FixedLenFeature([], tf.string),\n 'y': tf.FixedLenFeature([], tf.float32),\n 'x': tf.FixedLenFeature([], tf.float32),\n }))\n\n with beam.Pipeline() as pipeline:\n with tft_beam.Context(temp_dir=tempfile.mkdtemp()):\n transform_fn = pipeline | transform_fn_io.ReadTransformFn('./transform_output')\n transformed_dataset = (\n ((raw_data, raw_data_metadata), transform_fn)\n | tft_beam.TransformDataset())\n \n transformed_data, transformed_metadata = transformed_dataset\n \n _ = (\n transformed_data\n | beam.ParDo(lambda x: pprint.pprint(x)))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.FixedLenFeature" ] ]
smsegal/PerceptualSimilarity
[ "f685b3c346dd7c6232a5cacfdbfd5cc95b145403", "f685b3c346dd7c6232a5cacfdbfd5cc95b145403" ]
[ "perceptual_similarity/models/networks_basic.py", "compute_dists_pair.py" ]
[ "\nfrom __future__ import absolute_import\n\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\nimport numpy as np\nfrom pdb import set_trace as st\nfrom skimage import color\nfrom IPython import embed\n\nfrom .pretrained_networks import vgg16, alexnet, squeezenet\nfrom ..util.util import l2, normalize_tensor, tensor2np, tensor2tensorlab, \\\n dssim, tensor2im\n\ndef spatial_average(in_tens, keepdim=True):\n return in_tens.mean([2,3],keepdim=keepdim)\n\ndef upsample(in_tens, out_H=64): # assumes scale factor is same for H and W\n in_H = in_tens.shape[2]\n scale_factor = 1.*out_H/in_H\n\n return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)\n\n# Learned perceptual metric\nclass PNetLin(nn.Module):\n def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True):\n super(PNetLin, self).__init__()\n\n self.pnet_type = pnet_type\n self.pnet_tune = pnet_tune\n self.pnet_rand = pnet_rand\n self.spatial = spatial\n self.lpips = lpips\n self.version = version\n self.scaling_layer = ScalingLayer()\n\n if(self.pnet_type in ['vgg','vgg16']):\n net_type = vgg16\n self.chns = [64,128,256,512,512]\n elif(self.pnet_type=='alex'):\n net_type = alexnet\n self.chns = [64,192,384,256,256]\n elif(self.pnet_type=='squeeze'):\n net_type = squeezenet\n self.chns = [64,128,256,384,384,512,512]\n self.L = len(self.chns)\n\n self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)\n\n if(lpips):\n self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)\n self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)\n self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)\n self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)\n self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)\n self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]\n if(self.pnet_type=='squeeze'): # 7 layers for squeezenet\n self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)\n self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)\n self.lins+=[self.lin5,self.lin6]\n\n def forward(self, in0, in1, retPerLayer=False):\n # v0.0 - original release had a bug, where input was not scaled\n in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)\n outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)\n feats0, feats1, diffs = {}, {}, {}\n\n for kk in range(self.L):\n feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])\n diffs[kk] = (feats0[kk]-feats1[kk])**2\n\n if(self.lpips):\n if(self.spatial):\n res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]\n else:\n res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]\n else:\n if(self.spatial):\n res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]\n else:\n res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]\n\n val = res[0]\n for l in range(1,self.L):\n val += res[l]\n \n if(retPerLayer):\n return (val, res)\n else:\n return val\n\nclass ScalingLayer(nn.Module):\n def __init__(self):\n super(ScalingLayer, self).__init__()\n self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])\n self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])\n\n def forward(self, inp):\n return (inp - self.shift) / self.scale\n\n\nclass NetLinLayer(nn.Module):\n ''' A single linear layer which does a 1x1 conv '''\n def __init__(self, chn_in, chn_out=1, use_dropout=False):\n super(NetLinLayer, self).__init__()\n\n layers = [nn.Dropout(),] if(use_dropout) else []\n layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]\n self.model = nn.Sequential(*layers)\n\n\nclass Dist2LogitLayer(nn.Module):\n ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''\n def __init__(self, chn_mid=32, use_sigmoid=True):\n super(Dist2LogitLayer, self).__init__()\n\n layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]\n layers += [nn.LeakyReLU(0.2,True),]\n layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]\n layers += [nn.LeakyReLU(0.2,True),]\n layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]\n if(use_sigmoid):\n layers += [nn.Sigmoid(),]\n self.model = nn.Sequential(*layers)\n\n def forward(self,d0,d1,eps=0.1):\n return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))\n\nclass BCERankingLoss(nn.Module):\n def __init__(self, chn_mid=32):\n super(BCERankingLoss, self).__init__()\n self.net = Dist2LogitLayer(chn_mid=chn_mid)\n # self.parameters = list(self.net.parameters())\n self.loss = torch.nn.BCELoss()\n\n def forward(self, d0, d1, judge):\n per = (judge+1.)/2.\n self.logit = self.net.forward(d0,d1)\n return self.loss(self.logit, per)\n\n# L2, DSSIM metrics\nclass FakeNet(nn.Module):\n def __init__(self, use_gpu=True, colorspace='Lab'):\n super(FakeNet, self).__init__()\n self.use_gpu = use_gpu\n self.colorspace=colorspace\n\nclass L2(FakeNet):\n\n def forward(self, in0, in1, retPerLayer=None):\n assert(in0.size()[0]==1) # currently only supports batchSize 1\n\n if(self.colorspace=='RGB'):\n (N,C,X,Y) = in0.size()\n value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)\n return value\n elif(self.colorspace=='Lab'):\n value = l2(tensor2np(tensor2tensorlab(in0.data,to_norm=False)), \n tensor2np(tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')\n ret_var = Variable( torch.Tensor((value,) ) )\n if(self.use_gpu):\n ret_var = ret_var.cuda()\n return ret_var\n\nclass DSSIM(FakeNet):\n\n def forward(self, in0, in1, retPerLayer=None):\n assert(in0.size()[0]==1) # currently only supports batchSize 1\n\n if(self.colorspace=='RGB'):\n value = dssim(1.*tensor2im(in0.data), 1.*tensor2im(in1.data), range=255.).astype('float')\n elif(self.colorspace=='Lab'):\n value = dssim(tensor2np(tensor2tensorlab(in0.data,to_norm=False)), \n tensor2np(tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')\n ret_var = Variable( torch.Tensor((value,) ) )\n if(self.use_gpu):\n ret_var = ret_var.cuda()\n return ret_var\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print('Network',net)\n print('Total number of parameters: %d' % num_params)\n", "import argparse\nimport os\nfrom perceptual_similarity import models\nimport numpy as np\nfrom perceptual_similarity.util import util\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-d','--dir', type=str, default='./imgs/ex_dir_pair')\nparser.add_argument('-o','--out', type=str, default='./imgs/example_dists.txt')\nparser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU')\n\nopt = parser.parse_args()\n\n## Initializing the model\nmodel = models.PerceptualLoss(model='net-lin',net='alex',use_gpu=opt.use_gpu)\n\n# crawl directories\nf = open(opt.out,'w')\nfiles = os.listdir(opt.dir)\n\ndists = []\nfor (ff,file0) in enumerate(files[:-1]):\n\timg0 = util.im2tensor(util.load_image(os.path.join(opt.dir,file0))) # RGB image from [-1,1]\n\tif(opt.use_gpu):\n\t\timg0 = img0.cuda()\n\n\tfor (gg,file1) in enumerate(files[ff+1:]):\n\t\timg1 = util.im2tensor(util.load_image(os.path.join(opt.dir,file1)))\n\t\tif(opt.use_gpu):\n\t\t\timg1 = img1.cuda()\n\n\t\t# Compute distance\n\t\tdist01 = model.forward(img0,img1).item()\n\t\tdists.append(dist01)\n\t\tprint('(%s, %s): %.3f'%(file0,file1,dist01))\n\t\tf.writelines('(%s, %s): %.3f'%(file0,file1,dist01))\n\ndist_mean = np.mean(np.array(dists))\nprint('Mean: %.3f'%dist_mean)\nf.writelines('Mean: %.3f'%dist_mean)\n\nf.close()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.mean", "torch.Tensor", "torch.cat", "torch.nn.Conv2d", "torch.nn.BCELoss", "torch.nn.Sigmoid", "torch.nn.Upsample", "torch.nn.LeakyReLU" ], [ "numpy.array" ] ]
dmgav/ptycho_gui
[ "4474008f85b0aad4519fb2236be8b81c8c6e818f" ]
[ "nsls2ptycho/core/widgets/mplcanvas.py" ]
[ "import os\nfrom PyQt5 import QtCore, QtWidgets, QtGui\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.pyplot import Axes\nimport matplotlib.cm as cm\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable\n\nimport csv\nimport numpy as np\nfrom PIL import Image\n\nfrom nsls2ptycho.core.ptycho.utils import split\n\n\ndef load_image_pil(path):\n \"\"\"\n Read images using the PIL lib\n \"\"\"\n file = Image.open(str(path)) # 'I;16B'\n return np.array(file.getdata()).reshape(file.size[::-1])\n\n\ndef load_image_ascii(path):\n \"\"\"\n Read ASCII images using the csv lib\n \"\"\"\n delimiter = '\\t'\n data = []\n for row in csv.reader(open(path), delimiter=delimiter):\n data.append(row[:-1])\n img = np.array(data).astype(np.double)\n return img\n\n\ndef brush_to_color_tuple(brush):\n r, g, b, a = brush.color().getRgbF()\n return r, g, b\n\n\nclass MplCanvas(FigureCanvas):\n \"\"\"Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).\"\"\"\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n\n ax = Axes(fig, [0., 0., 1., 1.])\n fig.add_axes(ax)\n self.axes = ax\n self.fig = fig\n\n self.canvas = FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n #self.figure.subplots_adjust(left= 0.15, bottom=0.15)\n window_brush = self.window().palette().window()\n fig.set_facecolor(brush_to_color_tuple(window_brush))\n fig.set_facecolor(brush_to_color_tuple(window_brush))\n\n self.reset()\n\n def reset(self):\n self.image_handlers = None\n self.line_handlers = []\n self.axes.clear()\n self.axes.set_axis_off() # must be after clear()\n self.draw() # test \n\n def compute_initial_figure(self):\n pass\n\n def axis_on(self):\n self.axes.set_axis_on()\n make_axes_area_auto_adjustable(self.axes)\n\n def update_image(self, image):\n if self.image_handlers is None:\n self.image_handlers = self.axes.imshow(image)\n else:\n self.image_handlers.set_data(image)\n # amplitude and phase have dramatically different ranges, so rescaling is necessary\n self.image_handlers.autoscale()\n self.draw()\n\n def update_plot(self, xValues, yValues):\n\n num_plots = yValues.shape[1]\n if len(self.line_handlers) == 0:\n for i in range(num_plots):\n h = self.axes.semilogy(xValues, yValues[:,i])\n self.line_handlers.append(h[0])\n else:\n for hidx, h in enumerate(self.line_handlers):\n h.set_data(xValues, yValues[:,hidx])\n self.axes.tick_params(axis='both', labelsize=8)\n self.axes.set_yscale('log')\n self.axes.relim(visible_only=True)\n #self.axes.autoscale(tight=True)\n #self.axes.autoscale_view(tight=True)\n self.axes.autoscale_view()\n\n self.draw()\n\n def update_scatter(self, pts, mpi_size, colormap=cm.jet):\n '''\n Plot N scanning points in mpi_size different colors\n\n Parameters:\n - pts: np.array([[x0, x1, ..., xN], [y0, y1, ..., yN]])\n - mpi_size: number of MPI processes\n - colormap \n '''\n # only show up to 15 items in the legend to fit in the window\n label_set = set([i for i in range(9)] + [i for i in range(mpi_size, mpi_size-6, -1)])\n #labels = []\n too_long = False\n a = split(pts.shape[1], mpi_size)\n if len(self.line_handlers) == 0:\n colors = colormap(np.linspace(0, 1, len(a)))\n for i in range(mpi_size):\n if mpi_size <=15 or i in label_set:\n label = 'Process %i'%i\n #labels.append(label)\n elif i==mpi_size-6 and i not in label_set:\n label = r' $\\vdots$'\n #labels.append(label)\n too_long = True\n else:\n label = '_nolegend_' # matplotlib undocumented secret...\n h = self.axes.scatter(pts[0, a[i][0]:a[i][1]], pts[1, a[i][0]:a[i][1]], c=[colors[i]], label=label)\n self.line_handlers.append(h)\n else: # assuming mpi_size is unchanged\n for i, h in enumerate(self.line_handlers):\n h.set_offsets(np.array([pts[0, a[i][0]:a[i][1]], pts[1, a[i][0]:a[i][1]]]).transpose())\n # TODO: handle plot limits?\n ##self.axes.tick_params(axis='both', labelsize=8)\n #self.axes.relim(visible_only=True)\n ##self.axes.autoscale(tight=True)\n ##self.axes.autoscale_view(tight=True)\n #self.axes.autoscale_view()\n\n # we have a rectangular window, make the plot align to its center left\n self.axes.set_aspect(aspect='equal', anchor='W')\n legend = self.axes.legend(bbox_to_anchor=(0.98, 1.0), fancybox=True)\n\n # for the label \\vdots, remove its marker\n if too_long:\n legend.legendHandles[9].set_sizes([0])\n #self.axes.legend(legend.legendHandles, labels, bbox_to_anchor=(0.98, 1.0), fancybox=True)\n\n self.draw()\n" ]
[ [ "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "matplotlib.figure.Figure", "matplotlib.pyplot.Axes", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry", "numpy.array", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy" ] ]
mldiego/sonode
[ "19726b437d3aba94c9f621afa519b0d3a71bbfea" ]
[ "experiments/function_fitting/double_function/plot_triple_func.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport seaborn as sns\n\n\nfig = plt.figure(figsize=[10, 4])\nfig.subplots_adjust(hspace=0., wspace=0)\n\n\nsns.set_style('dark')\nrc('font', family='serif')\nrc('text', usetex=True)\nax1 = plt.subplot(1, 2, 1)\nfilename = 'results./double_func./mixed./'\nlearnt_f1 = np.load(filename+'learnt_f1.npy')\nlearnt_f2 = np.load(filename+'learnt_f2.npy')\nsamp_ts = np.load(filename+'ts.npy')\nreal_f1 = np.load(filename+'real_f1.npy')\nreal_f2 = np.load(filename+'real_f2.npy')\nlearnt_a1 = np.load(filename+'learnt_a1.npy')\nlearnt_a2 = np.load(filename+'learnt_a2.npy')\n\nplt.plot(samp_ts, learnt_f1, color='#004488', label='Learnt $x_{1}$')\nplt.plot(samp_ts, learnt_f2, color='#BB5566', label='Learnt $x_{2}$')\nplt.scatter(samp_ts, real_f1, color='#004488', s=20)#, label='True x_1')\nplt.scatter(samp_ts, real_f2, color='#BB5566', s=20)#, label='True x_2')\nplt.plot(samp_ts, learnt_a1, color = '#004488', label='$a_{1}$', linestyle='--')\nplt.plot(samp_ts, learnt_a2, color='#BB5566', label='$a_{2}$', linestyle='--')\n #plt.legend()\nplt.xlabel('t', fontsize=18)\nplt.ylabel('$x_{1}, x_{2}, a_{1}, a_{2}$', fontsize=18)\nplt.title('ANODE(1) Double Function', fontsize=20)\n#sns.set_style('white')\nplt.legend(loc='upper right', ncol=2, fontsize=13)\n\n\n\nsns.set_style('dark')\nax2 = plt.subplot(1, 2, 2)\n\nfilename = 'results./triple_func./'\nlearnt_f1 = np.load(filename+'learnt_f1.npy')\nlearnt_f2 = np.load(filename+'learnt_f2.npy')\nlearnt_f3 = np.load(filename+'learnt_f3.npy')\nsamp_ts = np.load(filename+'ts.npy')\nreal_f1 = np.load(filename+'real_f1.npy')\nreal_f2 = np.load(filename+'real_f2.npy')\nreal_f3 = np.load(filename+'real_f3.npy')\nlearnt_a1 = np.load(filename+'learnt_a1.npy')\nlearnt_a2 = np.load(filename+'learnt_a2.npy')\nlearnt_a3 = np.load(filename+'learnt_a3.npy')\n\nrc('font', family='serif')\nrc('text', usetex=True)\nplt.plot(samp_ts, learnt_f1, color='#004488', label='Learnt $x_{1}$')\nplt.plot(samp_ts, learnt_f2, color='#BB5566', label='Learnt $x_{2}$')\nplt.plot(samp_ts, learnt_f3, color='#DDAA33', label='Learnt $x_{3}$')\nplt.scatter(samp_ts, real_f1, color='#004488', s=20)#, label='True x_1')\nplt.scatter(samp_ts, real_f2, color='#BB5566', s=20)#, label='True x_2')\nplt.scatter(samp_ts, real_f3, color='#DDAA33', s=20)#, label='True x_3')\nplt.plot(samp_ts, learnt_a1, color = '#004488', label='$a_{1}$', linestyle='--')\nplt.plot(samp_ts, learnt_a2, color='#BB5566', label='$a_{2}$', linestyle='--')\nplt.plot(samp_ts, learnt_a3, color='#DDAA33', label='$a_{3}$', linestyle='--')\n #plt.legend()\nplt.xlabel('t', fontsize=18)\nplt.ylabel('$x_{1}, x_{2}, x_{3}, a_{1}, a_{2}, a_{3}$', fontsize=18)\nplt.title('ANODE(1) Triple Function', fontsize=20)\n#sns.set_style('white')\nplt.legend(ncol=2, fontsize=13)\n\n\nplt.tight_layout()\nplt.savefig('triple_func.png', bbox_inches='tight')" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "numpy.load", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
mbrukman/cloudml-samples
[ "6002cf7b57112f98b20ab11a37c1b17f7337e8cf" ]
[ "cloudml-template/template/trainer/task.py" ]
[ "#!/usr/bin/env python\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport argparse\nfrom datetime import datetime\n\nimport tensorflow as tf\n\nimport metadata\nimport input\nimport model\n\n\n# ******************************************************************************\n# YOU MAY MODIFY THIS FUNCTION TO ADD/REMOVE PARAMS OR CHANGE THE DEFAULT VALUES\n# ******************************************************************************\n\n\ndef initialise_hyper_params(args_parser):\n \"\"\"\n Define the arguments with the default values,\n parses the arguments passed to the task,\n and set the HYPER_PARAMS global variable\n\n Args:\n args_parser\n \"\"\"\n\n # Data files arguments\n args_parser.add_argument(\n '--train-files',\n help='GCS or local paths to training data',\n nargs='+',\n required=True\n )\n args_parser.add_argument(\n '--eval-files',\n help='GCS or local paths to evaluation data',\n nargs='+',\n required=True\n )\n args_parser.add_argument(\n '--feature-stats-file',\n help='GCS or local paths to feature statistics json file',\n nargs='+',\n default=None\n )\n ###########################################\n\n # Experiment arguments - training\n args_parser.add_argument(\n '--train-steps',\n help=\"\"\"\n Steps to run the training job for. If --num-epochs and --train-size are not specified,\n this must be. Otherwise the training job will run indefinitely.\n if --num-epochs and --train-size are specified, then --train-steps will be:\n (train-size/train-batch-size) * num-epochs\\\n \"\"\",\n default=1000,\n type=int\n )\n args_parser.add_argument(\n '--train-batch-size',\n help='Batch size for each training step',\n type=int,\n default=200\n )\n args_parser.add_argument(\n '--train-size',\n help='Size of training set (instance count)',\n type=int,\n default=None\n )\n args_parser.add_argument(\n '--num-epochs',\n help=\"\"\"\\\n Maximum number of training data epochs on which to train.\n If both --train-size and --num-epochs are specified,\n --train-steps will be: (train-size/train-batch-size) * num-epochs.\\\n \"\"\",\n default=None,\n type=int,\n )\n ###########################################\n\n # Experiment arguments - evaluation\n args_parser.add_argument(\n '--eval-every-secs',\n help='How long to wait before running the next evaluation',\n default=120,\n type=int\n )\n args_parser.add_argument(\n '--eval-steps',\n help=\"\"\"\\\n Number of steps to run evaluation for at each checkpoint',\n Set to None to evaluate on the whole evaluation data\n \"\"\",\n default=None,\n type=int\n )\n args_parser.add_argument(\n '--eval-batch-size',\n help='Batch size for evaluation steps',\n type=int,\n default=200\n )\n ###########################################\n\n # Features processing arguments\n args_parser.add_argument(\n '--num-buckets',\n help='Number of buckets into which to discretize numeric columns',\n default=10,\n type=int\n )\n args_parser.add_argument(\n '--embedding-size',\n help='Number of embedding dimensions for categorical columns. value of 0 means no embedding',\n default=4,\n type=int\n )\n ###########################################\n\n # Estimator arguments\n args_parser.add_argument(\n '--learning-rate',\n help=\"Learning rate value for the optimizers\",\n default=0.1,\n type=float\n )\n args_parser.add_argument(\n '--learning-rate-decay-factor',\n help=\"\"\"\\\n **VALID FOR CUSTOM MODELS**\n The factor by which the learning rate should decay by the end of the training.\n decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)\n If set to 1.0 (default), then no decay will occur\n If set to 0.5, then the learning rate should reach 0.5 of its original value at the end of the training. \n Note that, decay_steps is set to train_steps\\\n \"\"\",\n default=1.0,\n type=float\n )\n args_parser.add_argument(\n '--hidden-units',\n help=\"\"\"\\\n Hidden layer sizes to use for DNN feature columns, provided in comma-separated layers. \n If --scale-factor > 0, then only the size of the first layer will be used to compute \n the sizes of subsequent layers \\\n \"\"\",\n default='30,30,30'\n )\n args_parser.add_argument(\n '--layer-sizes-scale-factor',\n help=\"\"\"\\\n Determine how the size of the layers in the DNN decays. \n If value = 0 then the provided --hidden-units will be taken as is\\\n \"\"\",\n default=0.7,\n type=float\n )\n args_parser.add_argument(\n '--num-layers',\n help='Number of layers in the DNN. If --scale-factor > 0, then this parameter is ignored',\n default=4,\n type=int\n )\n args_parser.add_argument(\n '--dropout-prob',\n help=\"The probability we will drop out a given coordinate\",\n default=None\n )\n args_parser.add_argument(\n '--encode-one-hot',\n help=\"\"\"\\\n If set to True, the categorical columns will be encoded as One-Hot indicators in the deep part of the DNN model.\n Otherwise, the categorical columns will only be used in the wide part of the DNN model\n \"\"\",\n action='store_true',\n default=True,\n )\n args_parser.add_argument(\n '--as-wide-columns',\n help=\"\"\"\\\n If set to True, the categorical columns will be used in the wide part of the DNN model\n \"\"\",\n action='store_true',\n default=True,\n )\n ###########################################\n\n # Saved model arguments\n args_parser.add_argument(\n '--job-dir',\n help='GCS location to write checkpoints and export models',\n required=True\n )\n args_parser.add_argument(\n '--reuse-job-dir',\n action='store_true',\n default=False,\n help=\"\"\"\\\n Flag to decide if the model checkpoint should\n be re-used from the job-dir. If False then the\n job-dir will be deleted\"\"\"\n )\n args_parser.add_argument(\n '--export-format',\n help='The input format of the exported SavedModel binary',\n choices=['JSON', 'CSV', 'EXAMPLE'],\n default='JSON'\n )\n ###########################################\n\n # Argument to turn on all logging\n args_parser.add_argument(\n '--verbosity',\n choices=[\n 'DEBUG',\n 'ERROR',\n 'FATAL',\n 'INFO',\n 'WARN'\n ],\n default='INFO',\n )\n\n return args_parser.parse_args()\n\n\n# ******************************************************************************\n# YOU NEED NOT TO CHANGE THE FUNCTION TO RUN THE EXPERIMENT\n# ******************************************************************************\n\n\ndef run_experiment(run_config):\n \"\"\"Train, evaluate, and export the model using tf.estimator.train_and_evaluate API\"\"\"\n\n train_input_fn = input.generate_input_fn(\n file_names_pattern=HYPER_PARAMS.train_files,\n mode=tf.estimator.ModeKeys.TRAIN,\n num_epochs=HYPER_PARAMS.num_epochs,\n batch_size=HYPER_PARAMS.train_batch_size\n )\n\n eval_input_fn = input.generate_input_fn(\n file_names_pattern=HYPER_PARAMS.eval_files,\n mode=tf.estimator.ModeKeys.EVAL,\n batch_size=HYPER_PARAMS.eval_batch_size\n )\n\n exporter = tf.estimator.FinalExporter(\n 'estimator',\n input.SERVING_FUNCTIONS[HYPER_PARAMS.export_format],\n as_text=False # change to true if you want to export the model as readable text\n )\n\n # compute the number of training steps based on num_epoch, train_size, and train_batch_size\n if HYPER_PARAMS.train_size is not None and HYPER_PARAMS.num_epochs is not None:\n train_steps = (HYPER_PARAMS.train_size / HYPER_PARAMS.train_batch_size) * \\\n HYPER_PARAMS.num_epochs\n else:\n train_steps = HYPER_PARAMS.train_steps\n\n train_spec = tf.estimator.TrainSpec(\n train_input_fn,\n max_steps=int(train_steps)\n )\n\n eval_spec = tf.estimator.EvalSpec(\n eval_input_fn,\n steps=HYPER_PARAMS.eval_steps,\n exporters=[exporter],\n throttle_secs=HYPER_PARAMS.eval_every_secs,\n )\n\n print(\"* experiment configurations\")\n print(\"===========================\")\n print(\"Train size: {}\".format(HYPER_PARAMS.train_size))\n print(\"Epoch count: {}\".format(HYPER_PARAMS.num_epochs))\n print(\"Train batch size: {}\".format(HYPER_PARAMS.train_batch_size))\n print(\"Training steps: {} ({})\".format(int(train_steps),\n \"supplied\" if HYPER_PARAMS.train_size is None else \"computed\"))\n print(\"Evaluate every {} seconds\".format(HYPER_PARAMS.eval_every_secs))\n print(\"===========================\")\n\n if metadata.TASK_TYPE == \"classification\":\n estimator = model.create_classifier(\n config=run_config\n )\n elif metadata.TASK_TYPE == \"regression\":\n estimator = model.create_regressor(\n config=run_config\n )\n else:\n estimator = model.create_estimator(\n config=run_config\n )\n\n # train and evaluate\n tf.estimator.train_and_evaluate(\n estimator,\n train_spec,\n eval_spec\n )\n\n\n# ******************************************************************************\n# THIS IS ENTRY POINT FOR THE TRAINER TASK\n# ******************************************************************************\n\n\ndef main():\n\n print('')\n print('Hyper-parameters:')\n print(HYPER_PARAMS)\n print('')\n\n # Set python level verbosity\n tf.logging.set_verbosity(HYPER_PARAMS.verbosity)\n\n # Set C++ Graph Execution level verbosity\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[HYPER_PARAMS.verbosity] / 10)\n\n # Directory to store output model and checkpoints\n model_dir = HYPER_PARAMS.job_dir\n\n # If job_dir_reuse is False then remove the job_dir if it exists\n print(\"Resume training:\", HYPER_PARAMS.reuse_job_dir)\n if not HYPER_PARAMS.reuse_job_dir:\n if tf.gfile.Exists(model_dir):\n tf.gfile.DeleteRecursively(model_dir)\n print(\"Deleted job_dir {} to avoid re-use\".format(model_dir))\n else:\n print(\"No job_dir available to delete\")\n else:\n print(\"Reusing job_dir {} if it exists\".format(model_dir))\n\n run_config = tf.estimator.RunConfig(\n tf_random_seed=19830610,\n log_step_count_steps=1000,\n save_checkpoints_secs=120, # change if you want to change frequency of saving checkpoints\n keep_checkpoint_max=3,\n model_dir=model_dir\n )\n\n run_config = run_config.replace(model_dir=model_dir)\n print(\"Model Directory:\", run_config.model_dir)\n\n # Run the train and evaluate experiment\n time_start = datetime.utcnow()\n print(\"\")\n print(\"Experiment started at {}\".format(time_start.strftime(\"%H:%M:%S\")))\n print(\".......................................\")\n\n run_experiment(run_config)\n\n time_end = datetime.utcnow()\n print(\".......................................\")\n print(\"Experiment finished at {}\".format(time_end.strftime(\"%H:%M:%S\")))\n print(\"\")\n time_elapsed = time_end - time_start\n print(\"Experiment elapsed time: {} seconds\".format(time_elapsed.total_seconds()))\n print(\"\")\n\n\nargs_parser = argparse.ArgumentParser()\nHYPER_PARAMS = initialise_hyper_params(args_parser)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Exists", "tensorflow.logging.set_verbosity", "tensorflow.estimator.EvalSpec", "tensorflow.estimator.RunConfig", "tensorflow.estimator.train_and_evaluate", "tensorflow.estimator.FinalExporter" ] ]
Sara-X/text-to-text-transfer-transformer
[ "2f3c40073cc45e11d5fe222a5c6088bb7d95071c" ]
[ "t5/models/hf_model.py" ]
[ "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Hugging Face Transformers T5 Model.\n\nThis model API is fully functional but should be treated as experimental and\nsubject to change. Due to implementation details, if you are interested in\nexactly replicating the results in ``Exploring the Limits of Transfer Learning\nwith a Unified Text-to-Text Transformer'' you should use the MtfModel API\ninstead.\n\nUsage example for fine-tuning and evaluating on CoLA:\n\n```Python\nimport functools\n\nimport t5\nimport torch\nimport transformers\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\nmodel = t5.models.HfPyTorchModel(\"t5-base\", \"/tmp/hft5/\", device)\n\n# Evaluate the pre-trained checkpoint, before further fine-tuning\nmodel.eval(\n \"glue_cola_v002\",\n sequence_length={\"inputs\": 64, \"targets\": 4},\n batch_size=128,\n)\n\n# Run 1000 steps of fine-tuning\nmodel.train(\n mixture_or_task_name=\"glue_cola_v002\",\n steps=1000,\n save_steps=100,\n sequence_length={\"inputs\": 64, \"targets\": 4},\n split=\"train\",\n batch_size=32,\n optimizer=functools.partial(transformers.AdamW, lr=1e-4),\n)\n\n# Evaluate after fine-tuning\nmodel.eval(\n \"glue_cola_v002\",\n checkpoint_steps=\"all\",\n sequence_length={\"inputs\": 64, \"targets\": 4},\n batch_size=128,\n)\n\n# Generate some predictions\ninputs = [\n \"cola sentence: This is a totally valid sentence.\",\n \"cola sentence: A doggy detail was walking famously.\",\n]\nmodel.predict(\n inputs,\n sequence_length={\"inputs\": 32},\n batch_size=2,\n output_file=\"/tmp/hft5/example_predictions.txt\",\n)\n```\n\n\"\"\"\n\nimport functools\nimport itertools\nimport os\nimport re\nimport time\n\nfrom absl import logging\nimport mesh_tensorflow.transformer.dataset as transformer_dataset\nimport t5.data\nfrom t5.models.t5_model import T5Model\nimport tensorflow.compat.v1 as tf\nimport tensorflow_datasets as tfds\nimport torch\nimport torch.utils.tensorboard\n\nCHECKPOINT_FILE_FORMAT = \"model-{}.checkpoint\"\n\n\ndef tokens_to_batches(dataset, sequence_length, batch_size, output_features):\n \"\"\"Convert a dataset of token sequences to batches of padded/masked examples.\n\n Args:\n dataset: tf.data.Dataset containing examples with token sequences.\n sequence_length: dict of int, a dict mapping feature name to length.\n batch_size: int, the number of padded sequences in each batch.\n output_features: list of str, features to include in the dataset.\n\n Returns:\n A generator that produces batches of numpy examples.\n \"\"\"\n dataset = transformer_dataset.pack_or_pad(\n dataset,\n sequence_length,\n pack=False,\n feature_keys=output_features,\n ensure_eos=True,\n )\n\n def _map_fn(ex):\n for key in output_features:\n tensor = ex[key]\n mask = tf.cast(tf.greater(tensor, 0), tensor.dtype)\n ex[key + \"_mask\"] = mask\n return ex\n\n dataset = dataset.map(\n _map_fn,\n num_parallel_calls=t5.data.preprocessors.num_parallel_calls()\n )\n\n dataset = dataset.batch(batch_size, drop_remainder=False)\n return tfds.as_numpy(dataset)\n\n\ndef get_dataset(mixture_or_task_name, sequence_length, split, batch_size):\n \"\"\"Get a generator of numpy examples for a given Task or Mixture.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to train on.\n Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n sequence_length: dict of int, a dict mapping feature name to length.\n split: str or `tensorflow_datasets.Split`, the data split to load.\n batch_size: int, the number of padded sequences in each batch.\n\n Returns:\n A generator that produces batches of numpy examples.\n \"\"\"\n task = t5.data.get_mixture_or_task(mixture_or_task_name)\n ds = task.get_dataset(sequence_length, split)\n return tokens_to_batches(\n ds, sequence_length, batch_size, tuple(task.output_features)\n )\n\n\ndef write_lines_to_file(lines, filename):\n \"\"\"Write each line to filename, replacing the file if it exists.\"\"\"\n if tf.io.gfile.exists(filename):\n tf.io.gfile.remove(filename)\n with tf.io.gfile.GFile(filename, \"w\") as output_file:\n output_file.write(\"\\n\".join([str(l) for l in lines]))\n\n\nclass HfPyTorchModel(T5Model):\n \"\"\"Wrapper class for Hugging Face Transformers PyTorch T5 model.\"\"\"\n\n def __init__(self, model_spec, model_dir, device):\n \"\"\"Constructor for HfModel class.\n\n Args:\n model_spec: A str to pass into the `pretrained_model_name_or_path`\n argument of `transformers.T5ForConditionalGeneration.from_pretrained`\n (e.g. `\"t5-base\"` or a path to a previously trained model) or an\n instance of the `transformers.configuration_t5.T5Config` class to use\n to directly construct the `transformers.T5ForConditionalGeneration`\n object.\n model_dir: str, directory to save and load model checkpoints.\n device: `torch.device` on which the model should be run.\n \"\"\"\n # We have to import transformers here because it has a side effect of\n # creating a TensorFlow graph, which prevents eager execution from being\n # enabled in files that import hf_model.py\n import transformers # pylint: disable=import-outside-toplevel,g-import-not-at-top\n if isinstance(model_spec, str):\n self._model = transformers.T5ForConditionalGeneration.from_pretrained(\n model_spec\n )\n elif isinstance(model_spec, transformers.T5Config):\n self._model = transformers.T5ForConditionalGeneration(model_spec)\n else:\n raise ValueError(\"model_spec should be a string or T5Config.\")\n\n tf.io.gfile.makedirs(model_dir)\n self._writer = torch.utils.tensorboard.writer.SummaryWriter(model_dir)\n self._model_dir = model_dir\n self._device = device\n if self._device.type == \"cuda\":\n self._model.cuda()\n self._step = 0\n self.load_latest_checkpoint()\n self.to_tensor = functools.partial(torch.as_tensor, device=self._device)\n\n @property\n def model(self):\n return self._model\n\n @property\n def step(self):\n return self._step\n\n def save_checkpoint(self, step):\n \"\"\"Save the current model parameters to the `model_dir`.\n\n Args:\n step: int, the current training step.\n \"\"\"\n path = os.path.join(self._model_dir, CHECKPOINT_FILE_FORMAT.format(step))\n torch.save(self._model.state_dict(), path)\n\n def load_checkpoint(self, step, model_dir=None):\n \"\"\"Load the model parameters from a checkpoint at a given step.\n\n Args:\n step: int, load the checkpoint from this training step.\n model_dir: str, the directory of the checkpoint to load or None to use\n this model's directory.\n \"\"\"\n model_dir = model_dir or self._model_dir\n path = os.path.join(model_dir, CHECKPOINT_FILE_FORMAT.format(step))\n logging.info(\"Loading from %s\", path)\n self._model.load_state_dict(torch.load(path))\n self._step = step\n\n def get_all_checkpoint_steps(self, model_dir=None):\n \"\"\"Retrieve the steps corresponding to all checkpoints in `model_dir`.\n\n Args:\n model_dir: str, the directory of the checkpoints or None to use this\n model's directory.\n\n Returns:\n A list of ints corresponding to all checkpoint steps, or None if there\n are no checkpoints in the model directory.\n \"\"\"\n model_dir = model_dir or self._model_dir\n checkpoint_files = tf.io.gfile.glob(\n os.path.join(model_dir, CHECKPOINT_FILE_FORMAT.format(\"*\"))\n )\n if not checkpoint_files:\n return\n step_regex = re.compile(\".*\" + CHECKPOINT_FILE_FORMAT.format(r\"(\\d+)\"))\n steps = [int(step_regex.match(path).group(1)) for path in checkpoint_files]\n return sorted(steps)\n\n def get_latest_checkpoint_step(self, model_dir=None):\n \"\"\"Retrieve the step corresponding to the most recent checkpoint.\n\n Args:\n model_dir: str, the directory of the checkpoints or None to use this\n model's directory.\n\n Returns:\n An integer corresponding to the most recent step, or None if there are no\n checkpoints in the model directory.\n \"\"\"\n steps = self.get_all_checkpoint_steps(model_dir)\n if steps is not None:\n return max(steps)\n\n def load_latest_checkpoint(self):\n \"\"\"Load the most recent checkpoint and update the model's current step.\"\"\"\n latest_step = self.get_latest_checkpoint_step()\n if latest_step is not None:\n self.load_checkpoint(latest_step)\n\n def train(\n self,\n mixture_or_task_name,\n steps,\n save_steps,\n sequence_length,\n split,\n batch_size,\n optimizer,\n learning_rate_scheduler=None,\n ):\n \"\"\"Train the model on the given Mixture or Task.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to train on.\n Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n steps: int, the total number of steps to train for.\n save_steps: int, the number of steps between checkpoint saves.\n sequence_length: dict of int, a dict mapping feature name to length.\n split: str or `tensorflow_datasets.Split`, the data split to load.\n batch_size: int, the number of padded sequences in each batch.\n optimizer: function that takes the model parameters as its sole argument.\n For example, to use an AdamW optimizer with a learning rate of 1e-4,\n you could pass in `functools.partial(transformers.AdamW, lr=1e-4)`.\n learning_rate_scheduler: optional function that takes in an optimizer as\n its sole argument. For example, to use a schedule that warms up the\n optimizer's learning rate after 100 steps, you could pass in\n `functools.partial(transformers.get_constant_schedule_with_warmup,\n num_warmup_steps=100)`.\n \"\"\"\n self._model.train()\n ds = get_dataset(mixture_or_task_name, sequence_length, split, batch_size)\n # Repeat dataset forever\n ds = itertools.cycle(ds)\n optimizer = optimizer(self._model.parameters())\n if learning_rate_scheduler:\n learning_rate_scheduler = learning_rate_scheduler(optimizer)\n\n now = time.time()\n for train_step, batch in enumerate(itertools.islice(ds, steps)):\n\n if not train_step % save_steps:\n # TODO(craffel): Consider saving optimizer and scheduler state.\n logging.info(\"Saving checkpoint for step %s\", self._step)\n self.save_checkpoint(self._step)\n\n self._model.zero_grad()\n loss, _, _ = self._model(\n input_ids=self.to_tensor(batch[\"inputs\"]),\n attention_mask=self.to_tensor(batch[\"inputs_mask\"]),\n decoder_attention_mask=self.to_tensor(batch[\"targets_mask\"]),\n lm_labels=self.to_tensor(batch[\"targets\"]),\n )\n loss.backward()\n optimizer.step()\n if learning_rate_scheduler:\n learning_rate_scheduler.step()\n\n self._writer.add_scalar(\n \"loss\", loss.detach().cpu().numpy(), self._step\n )\n self._writer.add_scalar(\"step/s\", 1 / (time.time() - now), self._step)\n now = time.time()\n self._step += 1\n\n logging.info(\"Saving final checkpoint for step %s\", self._step)\n self.save_checkpoint(self._step)\n\n def eval(\n self,\n mixture_or_task_name,\n sequence_length,\n batch_size,\n checkpoint_steps=None,\n summary_dir=None,\n split=\"validation\",\n **generate_kwargs,\n ):\n \"\"\"Evaluate the model on the given Mixture or Task.\n\n *Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not\n None`), the model's state will be replaced by the state in those\n checkpoints. If you have not saved your model before calling `eval`, you\n should call `save_checkpoint` before `eval` to avoid losing its parameter\n values and state.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to evaluate\n on. Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n sequence_length: dict of int, a dict mapping feature name to length.\n batch_size: int, the number of padded sequences in each batch.\n checkpoint_steps: int, list of ints, \"all\", or None. If None, eval in the\n model in its current state without loading any checkpoints. If an int\n or list of ints, evaluation will be run on the checkpoint files in\n `model_dir` whose global steps are those provided. If -1, eval on the\n latest checkpoint from the model directory. If \"all\", evaluate all\n checkpoints in the model directory.\n summary_dir: str, path to write TensorBoard events file summaries for\n eval. If None, use model_dir/{split}_eval.\n split: str, the mixture/task split to evaluate on.\n **generate_kwargs: Additional keyword arguments to pass to\n `transformers.PretrainedModel.generate()`, for example to change the\n decoding strategy. See the documentation for\n `transformers.PretrainedModel.generate()` for options.\n \"\"\"\n mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)\n vocab = t5.data.sentencepiece_vocabulary.SentencePieceVocabulary(\n mixture_or_task.sentencepiece_model_path\n )\n\n if isinstance(mixture_or_task, t5.data.Mixture):\n tasks = mixture_or_task.tasks\n elif isinstance(mixture_or_task, t5.data.Task):\n tasks = [mixture_or_task]\n\n for task in tasks:\n if split not in task.splits:\n logging.info(\n \"Task %s has no '%s' split; skipping eval.\", task.name, split\n )\n tasks = [task for task in tasks if split in task.splits]\n\n summary_dir = summary_dir or os.path.join(self._model_dir, f\"{split}_eval\")\n tf.io.gfile.makedirs(summary_dir)\n\n def _unbatch(batch):\n \"\"\"Converts a dict of lists to a list of dicts of singletons.\"\"\"\n return [dict(zip(batch, t)) for t in zip(*batch.values())]\n\n # Pre-load in all of the targets once before doing eval\n cached_targets = {}\n cached_examples = {}\n for task in tasks:\n if task.metric_fns:\n ds = get_dataset(task.name, sequence_length, split, batch_size)\n # Create list of postprocessed text targets\n batches = list(ds)\n if not batches:\n raise ValueError(f\"The '{split}' split of {task.name} is empty.\")\n # \"Unbatch\" the dataset\n examples = [ex for b in batches for ex in _unbatch(b)] # pylint:disable=g-complex-comprehension\n targets = [\n task.postprocess_fn( # pylint:disable=g-complex-comprehension\n tf.compat.as_text(ex[\"targets_plaintext\"]),\n example=ex,\n is_target=True\n ) for ex in examples\n ]\n targets_filename = os.path.join(summary_dir, f\"{task.name}_targets\")\n write_lines_to_file(targets, targets_filename)\n\n inputs_filename = os.path.join(summary_dir, f\"{task.name}_inputs\")\n inputs = [ex[\"inputs_plaintext\"] for ex in examples]\n write_lines_to_file(inputs, inputs_filename)\n\n cached_targets[task.name] = targets\n cached_examples[task.name] = batches\n\n def _eval_current_model():\n self._model.eval()\n for task in tasks:\n ds = cached_examples[task.name]\n targets = cached_targets[task.name]\n predictions = []\n for batch in ds:\n predicted_tokens = self._model.generate(\n input_ids=self.to_tensor(batch[\"inputs\"]), **generate_kwargs\n )\n predicted_tokens = predicted_tokens.cpu().numpy().tolist()\n predictions.extend(\n [\n task.postprocess_fn(vocab.decode(p), example=ex)\n for p, ex in zip(predicted_tokens, _unbatch(batch))\n ]\n )\n\n if len(targets) != len(predictions):\n raise ValueError(\n f\"#targets ({len(targets)}) != #predictions ({len(predictions)})\"\n )\n\n predictions_file = os.path.join(\n summary_dir, f\"{task.name}_{self._step}_predictions\"\n )\n write_lines_to_file(predictions, predictions_file)\n\n for metric_fn in task.metric_fns:\n scores = metric_fn(targets, predictions)\n for metric_name, metric_value in scores.items():\n tag = f\"eval/{task.name}/{metric_name}\"\n self._writer.add_scalar(tag, metric_value, self._step)\n logging.info(\n \"%s at step %d: %.3f\", tag, self._step, metric_value\n )\n\n self._writer.flush()\n\n if checkpoint_steps is None:\n _eval_current_model()\n return\n elif isinstance(checkpoint_steps, int):\n checkpoint_steps = [checkpoint_steps]\n elif checkpoint_steps == \"all\":\n checkpoint_steps = self.get_all_checkpoint_steps()\n elif not isinstance(checkpoint_steps, (list, tuple)):\n raise ValueError(\n f\"checkpoint_steps must be None, int or list; got {checkpoint_steps}\"\n )\n for checkpoint_step in checkpoint_steps:\n self.load_checkpoint(checkpoint_step)\n _eval_current_model()\n\n def predict(\n self,\n inputs,\n sequence_length,\n batch_size,\n output_file=None,\n sentencepiece_model_path=None,\n **generate_kwargs,\n ):\n \"\"\"Evaluate the model on the given Mixture or Task.\n\n *Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not\n None`), the model's state will be replaced by the state in those\n checkpoints. If you have not saved your model before calling `eval`, you\n should call `save_checkpoint` before `eval` to avoid losing its parameter\n values and state.\n\n Args:\n inputs: list of str or str, either a list of inputs to feed into the\n model or the path to a text file that contains a single input on each\n line.\n sequence_length: dict of int, a dict mapping feature name to length.\n batch_size: int, the number of padded sequences in each batch.\n output_file: str or None, path to write out predictions or None to skip\n writing.\n sentencepiece_model_path: str or None, path to a SentencePiece model file\n or None to use `t5.data.DEFAULT_SPM_PATH` (the model used for all\n pre-trained T5 models.)\n **generate_kwargs: Additional keyword arguments to pass to\n `transformers.PretrainedModel.generate()`, for example to change the\n decoding strategy. See the documentation for\n `transformers.PretrainedModel.generate()` for options.\n \"\"\"\n if isinstance(inputs, str):\n if not tf.io.gfile.exists(inputs):\n raise ValueError(\n f\"A str was provided for `inputs`, but the path {inputs} does not \"\n \"exist. If you want the model's output for {inputs}, you should \"\n \"feed in inputs=['{inputs}']\"\n )\n with tf.io.gfile.GFile(inputs) as f:\n inputs = [l.strip() for l in f]\n\n if sentencepiece_model_path is None:\n sentencepiece_model_path = t5.data.DEFAULT_SPM_PATH\n vocab = t5.data.sentencepiece_vocabulary.SentencePieceVocabulary(\n sentencepiece_model_path\n )\n\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n dataset = dataset.map(\n lambda ex: {\"inputs\": tf.cast(vocab.encode_tf(ex), tf.int64)},\n num_parallel_calls=t5.data.preprocessors.num_parallel_calls()\n )\n dataset = tokens_to_batches(\n dataset, sequence_length, batch_size, [\"inputs\"]\n )\n\n predictions = []\n for batch in dataset:\n predicted_tokens = self._model.generate(\n input_ids=self.to_tensor(batch[\"inputs\"]), **generate_kwargs\n )\n predicted_tokens = predicted_tokens.cpu().numpy().tolist()\n predictions.extend([vocab.decode(p) for p in predicted_tokens])\n\n for inp, pred in zip(inputs, predictions):\n logging.info(\"%s\\n -> %s\", inp, pred)\n\n if output_file is not None:\n write_lines_to_file(predictions, output_file)\n\n def finetune(\n self,\n mixture_or_task_name,\n finetune_steps,\n pretrained_model_dir,\n pretrained_checkpoint_step=-1,\n **train_kwargs,\n ):\n \"\"\"Trains model after loading from any existing checkpoint.\n\n Note that if you have initialized the model using a pre-trained model\n specification (e.g. by passing \"t5-base\" for `model_spec`) then you can\n just call `train` directly. This function is only provided for convenience\n for loading a pre-trained model checkpoint from an arbitrary model\n directory before calling `train`.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to evaluate\n on. Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n finetune_steps: int, the number of additional steps to train for.\n pretrained_model_dir: str, directory with pretrained model checkpoints.\n pretrained_checkpoint_step: int, checkpoint to initialize weights from.\n If -1 (default), use the latest checkpoint from the pretrained model\n directory.\n **train_kwargs: Additional keyword arguments to pass to `train`. See the\n docstring for `train` for more details.\n \"\"\"\n if pretrained_checkpoint_step == -1:\n pretrained_checkpoint_step = self.get_latest_checkpoint_step(\n pretrained_model_dir\n )\n self.load_checkpoint(pretrained_checkpoint_step, pretrained_model_dir)\n self.train(mixture_or_task_name, finetune_steps, **train_kwargs)\n" ]
[ [ "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.io.gfile.remove", "torch.load", "tensorflow.compat.v1.io.gfile.GFile", "torch.utils.tensorboard.writer.SummaryWriter", "tensorflow.compat.v1.data.Dataset.from_tensor_slices", "tensorflow.compat.v1.compat.as_text", "tensorflow.compat.v1.greater" ] ]
hardikkarena/DCASE2019_task3
[ "cee08dec6fa9f508be4b4de85adeadefddec4793" ]
[ "classif/utils_classif.py" ]
[ "\nimport numpy as np\nimport os, re\n\n\n#########################################################################\n# Some of these functions have been inspired on a framework by Marius Miron developed for a pydata workshop\n# https://github.com/nkundiushuti/pydata2017bcn/blob/master/util.py\n#########################################################################\n\n\ndef save_tensor(var, out_path=None, suffix='_mel'):\n \"\"\"\n Saves a numpy array as a binary file\n -review the shape saving when it is a label\n \"\"\"\n assert os.path.isdir(os.path.dirname(out_path)), \"path to save tensor does not exist\"\n var.tofile(out_path.replace('.data', suffix + '.data'))\n save_shape(out_path.replace('.data', suffix + '.shape'), var.shape)\n\n\ndef load_tensor(in_path, suffix=''):\n \"\"\"\n Loads a binary .data file\n \"\"\"\n assert os.path.isdir(os.path.dirname(in_path)), \"path to load tensor does not exist\"\n f_in = np.fromfile(in_path.replace('.data', suffix + '.data'))\n shape = get_shape(in_path.replace('.data', suffix + '.shape'))\n f_in = f_in.reshape(shape)\n return f_in\n\n\ndef save_shape(shape_file, shape):\n \"\"\"\n Saves the shape of a numpy array\n \"\"\"\n with open(shape_file, 'w') as fout:\n fout.write(u'#'+'\\t'.join(str(e) for e in shape)+'\\n')\n\n\ndef get_shape(shape_file):\n \"\"\"\n Reads a .shape file\n \"\"\"\n with open(shape_file, 'rb') as f:\n line=f.readline().decode('ascii')\n if line.startswith('#'):\n shape=tuple(map(int, re.findall(r'(\\d+)', line)))\n return shape\n else:\n raise IOError('Failed to find shape in file')\n\n\ndef get_num_instances_per_file(f_name, patch_len=25, patch_hop=12):\n \"\"\"\n Return the number of context_windows or instances generated out of a given file\n \"\"\"\n shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n return np.maximum(1, int(np.ceil((file_frames-patch_len)/patch_hop)))\n\n\ndef get_feature_size_per_file(f_name):\n \"\"\"\n Return the dimensionality of the features in a given file.\n Typically, this will be the number of bins in a T-F representation\n \"\"\"\n shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]\n\n\ndef make_sure_isdir(pre_path, _out_file):\n \"\"\"\n make sure the a directory at the end of pre_path exists. Else create it\n :param pre_path:\n :param args:\n :return:\n \"\"\"\n full_path = os.path.join(pre_path, _out_file)\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n return full_path\n" ]
[ [ "numpy.ceil" ] ]
shoarora/polytune
[ "86f31ba3f41ea47edcfa0442a29a79a2a46deaeb" ]
[ "lmtuners/lightning_modules/lm.py" ]
[ "\"\"\"Pytorch lightning module for language modelling.\"\"\"\nimport logging\nimport os\nfrom argparse import Namespace\n\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lamb import Lamb\nfrom transformers import get_linear_schedule_with_warmup\n\nlogger = logging.getLogger(__name__)\n\n\nclass LMTrainingModuleConfig(Namespace):\n \"\"\"Config class LMTrainingModule.\"\"\"\n def __init__(\n self,\n num_steps,\n mlm=True,\n save_path=None,\n weight_decay=0.0,\n learning_rate=5e-5,\n epsilon=1e-8,\n warmup_steps=0,\n save_on_val=False,\n ):\n super().__init__(num_steps=num_steps,\n mlm=mlm,\n save_path=save_path,\n weight_decay=weight_decay,\n learning_rate=learning_rate,\n epsilon=epsilon,\n warmup_steps=warmup_steps,\n save_on_val=save_on_val)\n\n\nclass LMTrainingModule(pl.LightningModule):\n def __init__(self, model, config, checkpoint_fn=None):\n super().__init__()\n self.config = config\n self.hparams = config\n self.checkpoint_fn = checkpoint_fn\n\n self.vocab_size = model.config.vocab_size\n\n self.model = model\n\n def forward(self, inputs, labels, attention_mask, token_type_ids):\n if self.config.mlm:\n outputs = self.model(inputs,\n masked_lm_labels=labels,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids)\n else:\n outputs = self.model(inputs,\n labels=labels,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids)\n return outputs\n\n def training_step(self, batch, batch_idx):\n inputs, labels, attention_mask, token_type_ids = batch\n outputs = self.forward(inputs, labels, attention_mask, token_type_ids)\n loss = outputs[0]\n perplexity = torch.exp(loss)\n\n preds = torch.argmax(outputs[1], dim=-1)\n correct_preds = (preds == labels)[labels.ne(-100)]\n acc = torch.sum(correct_preds).float() / correct_preds.numel()\n\n self._log_lr()\n tensorboard_logs = {\n 'train/loss': loss,\n 'train/perplexity': perplexity,\n 'train/acc': acc\n }\n return {'loss': loss, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx):\n inputs, labels, attention_mask, token_type_ids = batch\n outputs = self.forward(inputs, labels, attention_mask, token_type_ids)\n loss = outputs[0]\n\n preds = torch.argmax(outputs[1], dim=-1)\n correct_preds = (preds == labels)[labels.ne(-100)]\n acc = torch.sum(correct_preds).float() / correct_preds.numel()\n\n return {'val_loss': loss, 'val_acc': acc}\n\n def validation_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()\n\n perplexity = torch.exp(avg_loss)\n\n if self.trainer.proc_rank == 0:\n if self.config.save_on_val:\n output_dir = os.path.join(\n self.config.save_path,\n f\"{self.current_epoch}-{self.global_step}\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (self.model.module if hasattr(\n self.model, \"module\") else self.model)\n model_to_save.base_model.save_pretrained(output_dir)\n\n if self.checkpoint_fn:\n self.checkpoint_fn(self)\n\n tensorboard_logs = {\n 'val_loss': avg_loss,\n 'val/loss': avg_loss,\n 'val/acc': avg_acc,\n 'val/perplexity': perplexity\n }\n return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}\n\n def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in self.model.named_parameters()\n if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\":\n self.config.weight_decay,\n },\n {\n \"params\": [\n p for n, p in self.model.named_parameters()\n if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\":\n 0.0\n },\n ]\n\n t_total = self.config.num_steps\n\n optimizer = Lamb(optimizer_grouped_parameters,\n lr=self.config.learning_rate,\n eps=self.config.epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.config.warmup_steps,\n num_training_steps=t_total)\n\n scheduler_config = {'scheduler': scheduler, 'interval': 'step'}\n\n return [optimizer], [scheduler_config]\n\n def _log_lr(self):\n \"\"\"Logs learning rate to tensorboard.\n \"\"\"\n # get LR schedulers from the pytorch-lightning trainer object.\n scheduler = self.trainer.lr_schedulers[0]['scheduler']\n\n # tie LR stepping to global step.\n for i, lr in enumerate(scheduler.get_lr()):\n # add the scalar to the Experiment object.\n self.logger.experiment.add_scalar(f'lr_{i}', lr, self.global_step)\n" ]
[ [ "torch.exp", "torch.sum", "torch.stack", "torch.argmax" ] ]