repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
AbderrahimRezki/prep_unit_project
[ "ee389db4500ea65a3d15c265327dbbecc58a12ae" ]
[ "linear_algebra/curve_fitting.py" ]
[ "from scipy.optimize import curve_fit\n\ndef set_objective(x, a, b):\n \"\"\" return the objective function \"\"\"\n #To-Do set the objective equation\n return a * x + b\n\ndef get_result( x, y):\n \"\"\"Return optimal values for a and b for the equation y = a*x+b \"\"\"\n\n # curve fit\n estimations, _ = curve_fit(set_objective, x, y)\n # summarize the parameter values\n a, b = estimations\n print('y = %.5f * x + %.5f' % (a, b))\n return a,b \n" ]
[ [ "scipy.optimize.curve_fit" ] ]
RafLaf/Adversarial_classes
[ "7f49d5acd39a899e5ce88be8cd649a109c2801e8" ]
[ "s2m2.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom args import *\nfrom torch.nn.utils.weight_norm import WeightNorm\n\n\nimport random\n\nclass BasicBlockWRN(nn.Module):\n def __init__(self, in_planes, out_planes, stride, drop_rate):\n super(BasicBlockWRN, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.droprate = drop_rate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, drop_rate):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, drop_rate)\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, drop_rate):\n layers = []\n for i in range(int(nb_layers)):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, drop_rate))\n return nn.Sequential(*layers)\n def forward(self, x):\n return self.layer(x)\n\nclass distLinear(nn.Module):\n def __init__(self, indim, outdim):\n super(distLinear, self).__init__()\n self.L = nn.Linear( indim, outdim, bias = False)\n self.class_wise_learnable_norm = True #See the issue#4&8 in the github \n if self.class_wise_learnable_norm: \n WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm \n\n if outdim <=200:\n self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax\n else:\n self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.\n\n def forward(self, x):\n x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)\n x_normalized = x.div(x_norm+ 0.00001)\n if not self.class_wise_learnable_norm:\n L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)\n self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)\n cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github\n scores = self.scale_factor* (cos_dist) \n return scores\n\nclass S2M2R(nn.Module):\n def __init__(self, feature_maps, input_shape, rotations, depth = 28, widen_factor = 10, num_classes = 64, drop_rate = 0.5):\n super(S2M2R, self).__init__()\n nChannels = [feature_maps, feature_maps*widen_factor, 2 * feature_maps*widen_factor, 4 * feature_maps*widen_factor]\n n = (depth - 4) / 6\n self.conv1 = nn.Conv2d(input_shape[0], nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n \n self.blocks = torch.nn.ModuleList()\n self.blocks.append(NetworkBlock(n, nChannels[0], nChannels[1], BasicBlockWRN, 1, drop_rate))\n self.blocks.append(NetworkBlock(n, nChannels[1], nChannels[2], BasicBlockWRN, 2, drop_rate))\n self.blocks.append(NetworkBlock(n, nChannels[2], nChannels[3], BasicBlockWRN, 2, drop_rate))\n self.bn = nn.BatchNorm2d(nChannels[3])\n self.linear = distLinear(nChannels[3], int(num_classes))\n self.rotations = rotations\n self.rotationLinear = nn.Linear(nChannels[3], 4)\n\n def forward(self, x, index_mixup = None, lam = -1):\n if lam != -1:\n mixup_layer = random.randint(0, 3)\n else:\n mixup_layer = -1\n out = x\n if mixup_layer == 0:\n out = lam * out + (1 - lam) * out[index_mixup]\n \n out = self.conv1(out)\n for i in range(len(self.blocks)):\n out = self.blocks[i](out)\n if mixup_layer == i + 1:\n out = lam * out + (1 - lam) * out[index_mixup]\n out = torch.relu(self.bn(out))\n out = F.avg_pool2d(out, out.size()[2:])\n out = out.view(out.size(0), -1)\n features = out\n out = self.linear(features)\n if self.rotations:\n out_rotation = self.rotationLinear(features)\n return (out, out_rotation), features\n return out, features\n" ]
[ [ "torch.nn.Sequential", "torch.norm", "torch.nn.functional.dropout", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.utils.weight_norm.WeightNorm.apply" ] ]
stdacore/SinglePathOneShot_MAML
[ "31e428fff16d1b2af30677af353442d2c62f3e71" ]
[ "src/Search/tester.py" ]
[ "import torch\n\nfrom imagenet_dataset import get_train_dataprovider, get_val_dataprovider\nimport tqdm\nfrom utils import get_parameters, CrossEntropyLabelSmooth\nfrom network import ShuffleNetV2_OneShot\nimport torch.nn as nn\nassert torch.cuda.is_available()\n\ntrain_dataprovider, val_dataprovider = None, None\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef no_grad_wrapper(func):\n def new_func(*args, **kwargs):\n with torch.no_grad():\n return func(*args, **kwargs)\n return new_func\n\n\n# @no_grad_wrapper\ndef get_cand_err(model, cand, args):\n global train_dataprovider, val_dataprovider\n\n if train_dataprovider is None:\n use_gpu = False\n train_dataprovider = get_train_dataprovider(\n args.train_batch_size, use_gpu=True, num_workers=32)\n val_dataprovider = get_val_dataprovider(\n args.test_batch_size, use_gpu=True, num_workers=32)\n\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n max_train_iters = args.max_train_iters\n max_test_iters = args.max_test_iters\n\n print('clear bn statics....')\n # for m in model.modules():\n # if isinstance(m, torch.nn.BatchNorm2d):\n # m.running_mean = torch.zeros_like(m.running_mean)\n # m.running_var = torch.ones_like(m.running_var)\n\n print('train bn with training set (BN sanitize) ....')\n # meta_model = ShuffleNetV2_OneShot()\n # meta_model = nn.DataParallel(meta_model)\n # meta_model = meta_model.to(device)\n # for p, q in zip(model.parameters(), meta_model.parameters()):\n # if p is not None:\n # q = p.clone()\n\n optimizer = torch.optim.SGD(get_parameters(model), lr=0.001)\n criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)\n loss_function = criterion_smooth.cuda()\n model.train()\n\n for step in tqdm.tqdm(range(max_train_iters)):\n # print('train step: {} total: {}'.format(step,max_train_iters))\n data, target = train_dataprovider.next()\n # print('get data',data.shape)\n\n target = target.type(torch.LongTensor)\n\n data, target = data.to(device), target.to(device)\n \n # print(type(data))\n # data = data.requires_grad_()\n # data = torch.tensor(data.data, requires_grad=True)\n # data = data.cuda()\n # # target.requires_grad=True\n output = model(data, cand)\n\n # if step<10:\n # loss = loss_function(output, target)\n # optimizer.zero_grad()\n # loss.backward()\n # optimizer.step()\n\n del data, target, output\n\n top1 = 0\n top5 = 0\n total = 0\n\n print('starting test....')\n model.eval()\n\n for step in tqdm.tqdm(range(max_test_iters)):\n # print('test step: {} total: {}'.format(step,max_test_iters))\n data, target = val_dataprovider.next()\n batchsize = data.shape[0]\n # print('get data',data.shape)\n target = target.type(torch.LongTensor)\n data, target = data.to(device), target.to(device)\n\n logits = model(data, cand)\n\n prec1, prec5 = accuracy(logits, target, topk=(1, 5))\n\n # print(prec1.item(),prec5.item())\n\n top1 += prec1.item() * batchsize\n top5 += prec5.item() * batchsize\n total += batchsize\n\n del data, target, logits, prec1, prec5\n\n top1, top5 = top1 / total, top5 / total\n\n top1, top5 = 1 - top1 / 100, 1 - top5 / 100\n\n print('top1: {:.2f} top5: {:.2f}'.format(top1 * 100, top5 * 100))\n\n return top1, top5\n\n\ndef main():\n pass\n" ]
[ [ "torch.device", "torch.no_grad", "torch.cuda.is_available" ] ]
lawRathod/Deep-Q-Learning-Agent
[ "bdb5d7965f3a862b3cc47ba9b8ad38ee01718209" ]
[ "train_worker.py" ]
[ "from envs.rover_lander_1 import rover_lander_1\nfrom envs.rover_lander_2 import rover_lander_2\nimport os\nimport random\nimport datetime\nimport requests\nfrom collections import deque\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential, layers, optimizers, activations\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--master-endpoint\", help=\"Endpoint for train_master\")\nparser.add_argument(\"--worker-name\", help=\"Worker name\")\nparser.add_argument(\"--env\", help=\"Env name\")\nparser.add_argument(\"--show-preview\", help=\"Show preview\", action='store_true', default=False)\nparser.add_argument(\"--mem-size\", help=\"Replay memory size\", type=int, default=1000000)\nparser.add_argument(\"--max-ep\", help=\"Maximum episodes\", type=int, default=5000)\nparser.add_argument(\"--lr\", help=\"Learning rate\", type=float, default=0.001)\nparser.add_argument(\"--loss\", help=\"Loss function\", default=\"mse\")\nargs = parser.parse_args()\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nMASTER_ENDPOINT = args.master_endpoint\nWORKER_NAME = args.worker_name\nMAX_EPISODES = args.max_ep\nSHOW_PREVIEW = args.show_preview\nREPLAY_MEMORY_SIZE = args.mem_size\n\nRENDER_EVERY = 10\nSAVE_MODEL_EVERY = 500\nSAVE_MODEL_LOCAL_EVERY = 20\n\nTRAIN_PARAMS = {'learning_rate': args.lr,\n 'loss': args.loss,\n 'batch_size': 64\n }\n\nQ_PARAMS = {'epsilon': 1.0,\n 'gamma': 0.99,\n 'epsilon_min': 0.01,\n 'epsilon_decay': 0.996}\n\n\nid = None\n\ndef connect():\n global id\n id = int(requests.get(MASTER_ENDPOINT + f\"/master/connect?worker_name={WORKER_NAME}&env={args.env}&max_episodes={MAX_EPISODES}&current_episode=0\").text)\n \ndef update(properties=[]):\n global id\n requests.get(MASTER_ENDPOINT + f\"/master/update?id={str(id)}&\" + \"&\".join([str(p[0]) + \"=\" + str(p[1]) for p in properties]))\n \ndef send_model(model_path):\n global id\n files = {'model': open(model_path,'rb')}\n r = requests.post(MASTER_ENDPOINT + f\"/master/send_model?id={str(id)}\", files=files)\n \nclass customCallback(tf.keras.callbacks.Callback): \n def on_epoch_end(self, epoch, logs={}): \n update([(\"acc\", logs.get('acc')),\n (\"loss\", logs.get('loss')),\n (\"mse\", logs.get('mse'))])\n \n # def on_train_end(self, logs=None):\n # update([(\"last_trained\", \"curr_time\")])\n \nclass Agent:\n def __init__(self):\n self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)\n self.model = self.create_model()\n self.model_v = 0\n def create_model(self):\n model = Sequential([layers.Dense(150, input_dim=rover_lander_1.observation_space[0] * rover_lander_1.observation_space[1] * 3, activation=activations.relu),\n layers.Dense(120, activation=activations.relu),\n layers.Dense(rover_lander_1.action_space, activation=activations.linear)])\n model.compile(loss=TRAIN_PARAMS['loss'], optimizer=optimizers.Adam(lr=TRAIN_PARAMS['learning_rate']), metrics=['mse', 'acc'])\n return model\n \n def load_model(self, model_path):\n self.model = tf.keras.models.load_model(model_path)\n \n def save_model(self, local_only=True):\n self.model.save(f\"models/{WORKER_NAME}_v{self.model_v}.h5\")\n self.model_v = self.model_v + 1\n if not local_only:\n send_model(f\"models/{WORKER_NAME}.h5\")\n \n def update_replay_memory(self, state, action, reward, next_state, done):\n self.replay_memory.append((state, action, reward, next_state, done))\n \n def replay(self):\n if len(self.replay_memory) < TRAIN_PARAMS['batch_size']:\n return\n \n minibatch = random.sample(self.replay_memory, TRAIN_PARAMS['batch_size'])\n states = np.array([i[0] for i in minibatch])\n actions = np.array([i[1] for i in minibatch])\n rewards = np.array([i[2] for i in minibatch])\n next_states = np.array([i[3] for i in minibatch])\n dones = np.array([i[4] for i in minibatch])\n \n # states = np.squeeze(states, axis=0)\n # next_states = np.squeeze(next_states, axis=0)\n\n states = states.reshape(states.shape[0], np.prod(states.shape[1:]))/255\n next_states = next_states.reshape(next_states.shape[0], np.prod(next_states.shape[1:]))/255\n targets = rewards + Q_PARAMS['gamma']*(np.amax(self.model.predict_on_batch(next_states), axis=1))*(1-dones)\n targets_full = self.model.predict_on_batch(states)\n ind = np.array([i for i in range(TRAIN_PARAMS['batch_size'])])\n targets_full[[ind], [actions]] = targets\n\n self.model.fit(states, targets_full, epochs=1, verbose=0, callbacks=[customCallback()])\n if Q_PARAMS['epsilon'] > Q_PARAMS['epsilon_min']:\n Q_PARAMS['epsilon'] *= Q_PARAMS['epsilon_decay']\n\n\n def qs(self, state):\n state = state.reshape(1, np.prod(state.shape[:]))/255\n return np.argmax(self.model.predict(state))\n \nif __name__ == '__main__':\n if not SHOW_PREVIEW:\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n \n if args.env == 'rover_lander_1':\n env = rover_lander_1()\n elif args.env == 'rover_lander_2':\n env = rover_lander_2()\n \n agent = Agent()\n \n connect()\n episode_rewards = []\n for episode in tqdm(range(0, MAX_EPISODES), ascii=True, unit='episodes'):\n episode_reward = 0\n step = 1\n current_state = env.reset()\n done = False\n while not done:\n if np.random.random() > Q_PARAMS['epsilon']:\n action = agent.qs(current_state)\n else:\n action = env.random_action_sample()\n \n if SHOW_PREVIEW and not episode % RENDER_EVERY:\n env.render()\n\n new_state, reward, done = env.step(action)\n episode_reward += reward\n \n agent.update_replay_memory(current_state, action, reward, new_state, done)\n current_state = new_state\n agent.replay()\n step = step + 1\n \n \n episode_rewards.append(episode_reward)\n update([(\"last_ep_score\", episode_reward), \n (\"avg_ep_score\", sum(episode_rewards) / len(episode_rewards)),\n (\"num_step\", step)])\n \n if not episode % SAVE_MODEL_EVERY:\n pass\n # agent.save_model(local_only=False)\n \n if not episode % SAVE_MODEL_LOCAL_EVERY:\n agent.save_model(local_only=True)" ]
[ [ "tensorflow.keras.models.load_model", "numpy.random.random", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.Adam", "numpy.prod", "numpy.array" ] ]
robfalck/AoC2018
[ "9cc6a94d11d70ea11df4999df2fdf955cc5c155a" ]
[ "aoc2018/dec15/part1.py" ]
[ "\nimport numpy as np\nimport collections\nfrom operator import itemgetter\n\nnp.set_printoptions(linewidth=1024, edgeitems=1000)\n\ndelta = {'N': (-1, 0), 'W': (0, -1), 'E': (0, 1), 'S': (1, 0)}\ninv_delta = {v: k for k, v in delta.items()}\n\ndef distance(p1, p2):\n return np.array(p2) - np.array(p1)\n\ndef manhattan_distance(p1, p2):\n return sum(np.abs(distance(p1, p2)))\n\ndef build_graph(board):\n # Build the list of neighbors for each square in the grid. this is our graph.\n graph = {}\n for i in range(board.shape[0]):\n for j in range(board.shape[1]):\n if board[i, j] == 0:\n graph[i, j] = []\n for direction in 'N', 'E', 'S', 'W':\n row, col = i + delta[direction][0], j + delta[direction][1]\n if board[row, col] == 0:\n graph[i, j].append((row, col))\n return graph\n\ndef get_squares_in_range(allies, enemies, graph):\n \"\"\"\n returns the positions of the squares that are within range of the enemy units\n \"\"\"\n squares = []\n\n occupied = get_occupied_squares(allies, enemies)\n\n for e in enemies:\n if not e.alive:\n continue\n adjacent = graph[e.pos]\n squares.extend([sq for sq in adjacent if sq not in occupied])\n return squares\n\ndef get_occupied_squares(allies, enemies):\n occupied = set()\n for unit in allies + enemies:\n if unit.alive:\n occupied.add(unit.pos)\n return occupied\n\ndef get_num_moves(pos, board, allies, enemies):\n \"\"\"\n Returns a np array of the board where each element holds\n the number of moves required to get from pos to that element.\n \"\"\"\n\n # Setup the visited set and queue for the BFS\n visited, queue = set(), collections.deque([(0, pos)])\n\n grid = np.zeros_like(board) - 1\n visited.add(pos)\n\n # Setup a set of spaces occupied by units\n occupied = get_occupied_squares(allies, enemies)\n\n # Perform the BFS to find the number of moves required to\n # get to every accessible point on the grid.\n while queue:\n distance, p = queue.popleft()\n grid[p] = distance\n for neighbor in graph[p]:\n if neighbor not in visited and neighbor in graph and neighbor not in occupied:\n queue.append((distance+1, neighbor))\n visited.add(neighbor)\n\n return grid\n\ndef get_cost_map(pos, allies, enemies, graph):\n # Setup the visited set and queue for the BFS\n visited, queue = set(), collections.deque([(0, pos)])\n\n visited.add(pos)\n cost_map = {}\n\n # Setup a set of spaces occupied by units\n occupied = get_occupied_squares(allies, enemies)\n\n # Perform the BFS to find the number of moves required to\n # get to every accessible point on the grid.\n while queue:\n distance, p = queue.popleft()\n cost_map[p] = distance\n for neighbor in graph[p]:\n if neighbor not in visited and neighbor in graph and neighbor not in occupied:\n queue.append((distance+1, neighbor))\n visited.add(neighbor)\n\n return cost_map\n\n\ndef get_shortest_path(start, end, allies, enemies):\n \"\"\"\n Returns a np array of the board where each element holds\n the number of moves required to get from pos to that element.\n \"\"\"\n\n # Setup the visited set and queue for the BFS\n visited, queue = set(), collections.deque([(start)])\n visited.add(start)\n\n # Setup a set of spaces occupied by units\n occupied = get_occupied_squares(allies, enemies)\n\n # Perform the BFS to get the shortest path\n prev = {start: None}\n while queue:\n loc = queue.popleft()\n for neighbor in graph[loc]:\n if neighbor == end:\n # Found the end, clear the queue and log the previous location\\\n prev[neighbor] = loc\n break\n if neighbor not in visited and neighbor in graph and neighbor not in occupied:\n queue.append((neighbor))\n prev[neighbor] = loc\n visited.add(neighbor)\n\n # Reconstruct path\n path = []\n at = end\n for i in range(20):\n path.append(at)\n at = prev[at]\n if at is None:\n break\n else:\n path.reverse()\n path.reverse()\n\n return path\n\n\ndef print_board(turn, board, elves, goblins):\n g = np.empty(board.shape, dtype=str)\n g[:, :] = '.'\n for i in range(board.shape[0]):\n for j in range(board.shape[1]):\n if board[i, j] == 1:\n g[i, j] = '#'\n\n for elf in elves:\n g[elf.pos[0], elf.pos[1]] = 'E'\n\n for gob in goblins:\n g[gob.pos[0], gob.pos[1]] = 'G'\n\n print()\n for row in range(board.shape[0]):\n print(''.join(g[row, :]))\n print()\n\n\ndef print_cost_map(turn, board, elves, goblins, cost_map):\n g = np.empty(board.shape, dtype=str)\n g[:, :] = '.'\n for i in range(board.shape[0]):\n for j in range(board.shape[1]):\n if board[i, j] == 1:\n g[i, j] = '#'\n\n for elf in elves:\n g[elf.pos[0], elf.pos[1]] = 'E'\n\n for gob in goblins:\n g[gob.pos[0], gob.pos[1]] = 'G'\n\n for key, val in cost_map.items():\n g[key] = str(val)\n\n print()\n for row in range(board.shape[0]):\n print(''.join(g[row, :]))\n print()\n\n\nclass Unit(object):\n\n def __init__(self, pos, attack_power=3):\n self.pos = pos\n self.hit_points = 200\n self.attack_power = attack_power\n self.alive = True\n\n def __repr__(self):\n return '{0:6s}: pos:{1:10s} hp:{2:03d} {3}'.format(type(self).__name__, str(self.pos), self.hit_points, ' ' if self.alive else 'X')\n\n def get_attack_options(self, enemies):\n attack_directions = []\n for t in enemies:\n if not t.alive:\n continue\n if tuple(distance(self.pos, t.pos)) in delta.values():\n attack_directions.append(inv_delta[tuple(distance(self.pos, t.pos))])\n return attack_directions\n\n def __lt__(self, other):\n self_row, self_col = self.pos\n other_row, other_col = other.pos\n if self_row == other_row:\n return self_col < other_col\n return self_row < other_row\n\n def try_attack(self, enemies):\n # Can this unit attack this turn?\n attack_directions = self.get_attack_options(enemies)\n\n if not attack_directions:\n return False\n\n fewest_target_hit_points = 1E16\n target_to_attack = None\n for direction in ('N', 'W', 'E', 'S'):\n if direction in attack_directions:\n p = self.pos[0] + delta[direction][0], self.pos[1] + delta[direction][1]\n target_unit = [enemy for enemy in enemies if enemy.pos == p][0]\n if target_unit.hit_points < fewest_target_hit_points:\n fewest_target_hit_points = target_unit.hit_points\n target_to_attack = target_unit\n target_to_attack.hit_points -= self.attack_power\n\n if target_to_attack.hit_points <= 0:\n target_to_attack.alive = False\n\n return True\n\n def try_move(self, turn, allies, enemies, board, graph):\n # squares in range of the enemy\n squares = get_squares_in_range(allies, enemies, graph)\n # print(squares)\n\n # cost_map\n cost_map = get_cost_map(self.pos, allies, enemies, graph)\n # print(cost_map)\n # print_cost_map(turn, board, enemies, allies, cost_map)\n square_costs = {sq: cost_map[sq] for sq in squares if sq in cost_map}\n min_cost_squares = [sq for sq in squares if sq in cost_map and cost_map[sq] == min(square_costs.values())]\n if not min_cost_squares:\n # No viable targets found, do not move\n return False\n target_square = sorted(min_cost_squares, key=itemgetter(0, 1))[0]\n\n # now build a new cost_map from the targeted squares perspective\n # choose the square adjacent to this unit with the lowest cost for the first move\n # if multiple squares have the lowest cost, choose the first in reading order\n rev_cost_map = get_cost_map(target_square, allies, enemies, graph)\n first_step = None\n for direction in 'N', 'W', 'E', 'S':\n sq = self.pos[0] + delta[direction][0], self.pos[1] + delta[direction][1]\n if sq not in graph or sq not in rev_cost_map:\n continue\n if first_step is None or rev_cost_map[sq] < rev_cost_map[first_step]:\n first_step = sq\n\n if first_step is not None:\n self.pos = first_step\n return True\n return False\n\n def take_turn(self, turn, allies, enemies, board, graph):\n\n # I can't attack if I'm dead\n if not self.alive:\n return\n\n attack_performed = self.try_attack(enemies)\n if attack_performed:\n # Turn complete\n return\n\n # No attack performed, do a move\n move_performed = self.try_move(turn, allies, enemies, board, graph)\n if not move_performed:\n # Turn complete\n return\n\n self.try_attack(enemies)\n\n\nclass Elf(Unit):\n\n def __init__(self, pos, attack_power=3):\n super(Elf, self).__init__(pos, attack_power)\n\n\nclass Goblin(Unit):\n\n def __init__(self, pos, attack_power=3):\n super(Goblin, self).__init__(pos, attack_power)\n\n\ndef parse_initial_state(initial_state, elf_attack_power=3):\n\n rows = len(initial_state)\n cols = len(initial_state[0])\n\n board = np.zeros((rows, cols), dtype=int)\n\n elves = []\n goblins = []\n\n for i in range(rows):\n for j in range(cols):\n if initial_state[i][j] == '#':\n board[i, j] = 1\n elif initial_state[i][j] == 'E':\n elves.append(Elf((i, j), attack_power=elf_attack_power))\n elif initial_state[i][j] == 'G':\n goblins.append(Goblin((i, j)))\n\n return elves, goblins, board\n\n\ndef solve(initial_state):\n\n elves, goblins, board = parse_initial_state(initial_state)\n\n graph = build_graph(board)\n\n units = elves + goblins\n units.sort()\n\n print('\\ninitial setup')\n print_board(0, board, elves, goblins)\n for unit in units:\n print(unit)\n\n for turn in range(1,1000):\n print('\\nstart turn', turn)\n\n # Sort all units by read-order\n units = elves + goblins\n units.sort()\n\n for unit in units:\n if isinstance(unit, Elf):\n allies = elves\n enemies = goblins\n else:\n allies = goblins\n enemies = elves\n\n if unit is goblins[0] and turn == 2:\n print(unit.pos)\n\n unit.take_turn(turn, allies, enemies, board, graph)\n\n # number of enemies remaining\n num_enemies = len([e for e in enemies if e.alive])\n\n if num_enemies == 0:\n print('done after', turn-1, 'full turns')\n hp_remaining = sum([unit.hit_points for unit in units if unit.alive])\n print('hitpoints remaining', hp_remaining)\n print('result', (turn-1) * hp_remaining)\n exit(0)\n\n # clear the dead\n elves = [e for e in elves if e.alive]\n goblins = [g for g in goblins if g.alive]\n\n print_board(turn, board, elves, goblins)\n for unit in units:\n print(unit)\n print('end turn', turn)\n print()\n print()\n\n\nif __name__ == '__main__':\n # with open('test_input.txt', 'r') as f:\n # lines = [s.rstrip() for s in f.readlines()]\n # solve(initial_state=lines)\n\n\n with open('input.txt', 'r') as f:\n lines = [s.rstrip() for s in f.readlines()]\n solve(initial_state=lines)" ]
[ [ "numpy.set_printoptions", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
fang1fan/m5-python-starter
[ "434bcd701c04707e5a5c3ed07ee51d0a66687dfc" ]
[ "attention.py" ]
[ "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport gc; import os\nimport torch\nfrom torch.nn import *\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport gc\nimport os\nfrom tqdm.notebook import tqdm\n\nclass RelMultiHeadAttn(nn.Module):\n def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,\n tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False):\n super(RelMultiHeadAttn, self).__init__()\n\n self.n_head = n_head\n self.d_model = d_model\n self.d_head = d_head\n self.dropout = dropout\n\n self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)\n\n self.drop = nn.Dropout(dropout)\n self.dropatt = nn.Dropout(dropatt)\n self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)\n\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.scale = 1 / (d_head ** 0.5)\n\n self.pre_lnorm = pre_lnorm\n\n def _parallelogram_mask(self, h, w, left=False):\n mask = torch.ones((h, w)).byte()\n m = min(h, w)\n mask[:m,:m] = torch.triu(mask[:m,:m])\n mask[-m:,-m:] = torch.tril(mask[-m:,-m:])\n\n if left:\n return mask\n else:\n return mask.flip(0)\n\n def _shift(self, x, qlen, klen, mask, left=False):\n if qlen > 1:\n zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),\n device=x.device, dtype=x.dtype)\n else:\n zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)\n\n if left:\n mask = mask.flip(1)\n x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)\n else:\n x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)\n\n x = x_padded.masked_select(mask[:,:,None,None]) \\\n .view(qlen, klen, x.size(2), x.size(3))\n\n return x\n\n def _rel_shift(self, x, zero_triu=False):\n zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),\n device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=1)\n\n x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])\n\n x = x_padded[1:].view_as(x)\n\n if zero_triu:\n ones = torch.ones((x.size(0), x.size(1)))\n x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]\n\n return x\n\n def forward(self, w, r, attn_mask=None, mems=None):\n raise NotImplementedError\n\nclass RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):\n def __init__(self, *args, **kwargs):\n super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)\n\n self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)\n\n def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):\n qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)\n\n if mems is not None:\n cat = torch.cat([mems, w], 0)\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(cat))\n else:\n w_heads = self.qkv_net(cat)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n w_head_q = w_head_q[-qlen:]\n else:\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(w))\n else:\n w_heads = self.qkv_net(w)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n\n klen = w_head_k.size(0)\n\n w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n\n r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head\n\n #### compute attention score\n rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head\n AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head\n\n rr_head_q = w_head_q + r_r_bias\n BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head\n BD = self._rel_shift(BD)\n\n # [qlen x klen x bsz x n_head]\n attn_score = AC + BD\n attn_score.mul_(self.scale)\n\n #### compute attention probability\n if attn_mask is not None and attn_mask.any().item():\n if attn_mask.dim() == 2:\n attn_score = attn_score.float().masked_fill(\n attn_mask[None,:,:,None], -float('inf')).type_as(attn_score)\n elif attn_mask.dim() == 3:\n attn_score = attn_score.float().masked_fill(\n attn_mask[:,:,:,None], -float('inf')).type_as(attn_score)\n\n # [qlen x klen x bsz x n_head]\n attn_prob = F.softmax(attn_score, dim=1)\n attn_prob = self.dropatt(attn_prob)\n\n #### compute attention vector\n attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))\n\n # [qlen x bsz x n_head x d_head]\n attn_vec = attn_vec.contiguous().view(\n attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)\n\n ##### linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out)\n\n if self.pre_lnorm:\n ##### residual connection\n output = w + attn_out\n else:\n ##### residual connection + layer normalization\n output = self.layer_norm(w + attn_out)\n\n return output\n\nclass RelLearnableMultiHeadAttn(RelMultiHeadAttn):\n def __init__(self, *args, **kwargs):\n super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)\n\n def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):\n # r_emb: [klen, n_head, d_head], used for term B\n # r_w_bias: [n_head, d_head], used for term C\n # r_bias: [klen, n_head], used for term D\n\n qlen, bsz = w.size(0), w.size(1)\n\n if mems is not None:\n cat = torch.cat([mems, w], 0)\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(cat))\n else:\n w_heads = self.qkv_net(cat)\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n\n w_head_q = w_head_q[-qlen:]\n else:\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(w))\n else:\n w_heads = self.qkv_net(w)\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n\n klen = w_head_k.size(0)\n\n w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)\n w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)\n w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)\n\n if klen > r_emb.size(0):\n r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)\n r_emb = torch.cat([r_emb_pad, r_emb], 0)\n r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)\n r_bias = torch.cat([r_bias_pad, r_bias], 0)\n else:\n r_emb = r_emb[-klen:]\n r_bias = r_bias[-klen:]\n\n #### compute attention score\n rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head\n\n AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head\n B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head\n D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head\n BD = self._rel_shift(B_ + D_)\n\n # [qlen x klen x bsz x n_head]\n attn_score = AC + BD\n attn_score.mul_(self.scale)\n\n #### compute attention probability\n if attn_mask is not None and attn_mask.any().item():\n if attn_mask.dim() == 2:\n attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))\n elif attn_mask.dim() == 3:\n attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))\n\n # [qlen x klen x bsz x n_head]\n attn_prob = F.softmax(attn_score, dim=1)\n attn_prob = self.dropatt(attn_prob)\n\n #### compute attention vector\n attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))\n\n # [qlen x bsz x n_head x d_head]\n attn_vec = attn_vec.contiguous().view(\n attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)\n\n ##### linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out)\n\n if self.pre_lnorm:\n ##### residual connection\n output = w + attn_out\n else:\n ##### residual connection + layer normalization\n output = self.layer_norm(w + attn_out)\n\n return output\nclass PositionwiseGRUFF(torch.nn.Module):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):\n super(PositionwiseGRUFF, self).__init__()\n\n self.d_model = d_model\n self.d_inner = d_inner\n self.dropout = dropout\n\n self.CoreNet = nn.Sequential(\n nn.Linear(d_model, d_inner), \n nn.GRUCell(d_model, d_inner),\n nn.ReLU(inplace=True),\n nn.Dropout(dropout),\n nn.Linear(d_inner, d_model),\n nn.Dropout(dropout),\n )\n\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.pre_lnorm = pre_lnorm\n\n def forward(self, inp):\n if self.pre_lnorm:\n # layer normalization + positionwise feed-forward\n core_out = self.CoreNet(self.layer_norm(inp))\n\n # residual connection\n output = core_out + inp\n else:\n # positionwise feed-forward\n core_out = self.CoreNet(inp)\n\n # residual connection + layer normalization\n output = self.layer_norm(inp + core_out)\n\n return output\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.ones", "torch.cat", "torch.zeros", "torch.einsum", "torch.nn.GRUCell", "torch.tril", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.chunk", "torch.triu", "torch.nn.ReLU" ] ]
activityMonitoring/actipy
[ "b7c37ff65ea01851e5e934a646edbb4bef20efe4" ]
[ "actipy/reader.py" ]
[ "import os\nimport time\nimport struct\nimport shutil\nimport tempfile\nimport atexit\nimport zipfile\nimport gzip\nimport pathlib\nimport numpy as np\nimport pandas as pd\nimport jpype\n\nfrom actipy import processing\n\n\n__all__ = ['read_device', 'process']\n\n\ndef read_device(input_file,\n lowpass_hz=20,\n calibrate_gravity=True,\n detect_nonwear=True,\n resample_hz='uniform',\n verbose=True):\n \"\"\" \n Read and process accelerometer device file. Returns a pandas.DataFrame with\n the processed data and a dict with processing info. \n\n :param input_file: Path to accelerometer file.\n :type input_file: str\n :param lowpass_hz: Cutoff (Hz) for low-pass filter. Defaults to 20. Pass\n None or False to disable.\n :type lowpass_hz: int, optional\n :param calibrate_gravity: Whether to perform gravity calibration. Defaults to True.\n :type calibrate_gravity: bool, optional\n :param detect_nonwear: Whether to perform non-wear detection. Defaults to True. \n :type detect_nonwear: bool, optional\n :param resample_hz: Target frequency (Hz) to resample the signal. If\n \"uniform\", use the implied frequency (use this option to fix any device\n sampling errors). Pass None to disable. Defaults to \"uniform\".\n :type resample_hz: str or int, optional\n :param verbose: Verbosity, defaults to True.\n :type verbose: bool, optional\n :return: Processed data and processing info.\n :rtype: (pandas.DataFrame, dict)\n \"\"\"\n\n data, info_read = _read_device(input_file, verbose)\n\n data, info_process = process(data, info_read['SampleRate'],\n lowpass_hz=lowpass_hz,\n calibrate_gravity=calibrate_gravity,\n detect_nonwear=detect_nonwear,\n resample_hz=resample_hz,\n verbose=verbose)\n\n info_misc = processing.misc(data, info_process.get('ResampleRate', info_read['SampleRate']))\n\n info = {**info_read, **info_misc, **info_process}\n\n return data, info\n\n\ndef process(data, sample_rate,\n lowpass_hz=20,\n calibrate_gravity=True,\n detect_nonwear=True,\n resample_hz='uniform',\n verbose=True):\n \"\"\" \n Process a pandas.DataFrame of acceleration time-series. Returns a\n pandas.DataFrame with the processed data and a dict with processing info.\n\n :param data: A pandas.DataFrame of acceleration time-series. It must contain\n at least columns `x,y,z` and the index must be a DateTimeIndex.\n :type data: pandas.DataFrame.\n :param sample_rate: The data's sample rate (Hz).\n :type sample_rate: int or float\n :param lowpass_hz: Cutoff (Hz) for low-pass filter. Defaults to 20. Pass\n None or False to disable.\n :type lowpass_hz: int, optional\n :param calibrate_gravity: Whether to perform gravity calibration. Defaults to True.\n :type calibrate_gravity: bool, optional\n :param detect_nonwear: Whether to perform non-wear detection. Defaults to True. \n :type detect_nonwear: bool, optional\n :param resample_hz: Target frequency (Hz) to resample the signal. If\n \"uniform\", use the implied frequency (use this option to fix any device\n sampling errors). Pass None to disable. Defaults to \"uniform\".\n :type resample_hz: str or int, optional\n :param verbose: Verbosity, defaults to True.\n :type verbose: bool, optional\n :return: Processed data and processing info.\n :rtype: (pandas.DataFrame, dict)\n \"\"\"\n\n timer = Timer(verbose)\n\n info = {}\n\n if lowpass_hz not in (None, False):\n timer.start(\"Lowpass filter...\")\n data, info_lowpass = processing.lowpass(data, sample_rate, lowpass_hz)\n info.update(info_lowpass)\n timer.stop()\n\n # Used for calibration and nonwear detection\n # If needed, compute it once as it's expensive\n stationary_indicator = None\n if calibrate_gravity or detect_nonwear:\n timer.start(\"Getting stationary points...\")\n stationary_indicator = processing.get_stationary_indicator(data)\n timer.stop()\n\n if calibrate_gravity:\n timer.start(\"Gravity calibration...\")\n data, info_calib = processing.calibrate_gravity(data, stationary_indicator=stationary_indicator)\n info.update(info_calib)\n timer.stop()\n\n if detect_nonwear:\n timer.start(\"Nonwear detection...\")\n data, info_nonwear = processing.detect_nonwear(data, stationary_indicator=stationary_indicator)\n info.update(info_nonwear)\n timer.stop()\n\n if resample_hz not in (None, False):\n timer.start(\"Resampling...\")\n if resample_hz in ('uniform', True):\n data, info_resample = processing.resample(data, sample_rate)\n else:\n data, info_resample = processing.resample(data, resample_hz)\n info.update(info_resample)\n timer.stop()\n\n return data, info\n\n\ndef _read_device(input_file, verbose=True):\n \"\"\" Internal function that interfaces with the Java parser to read the\n device file. Returns parsed data as a pandas dataframe, and a dict with\n general info.\n \"\"\"\n\n try:\n\n timer = Timer(verbose)\n\n # Temporary diretory to store internal runtime files\n tmpdir = tempfile.mkdtemp()\n # Temporary file to store parsed device data\n tmpout = os.path.join(tmpdir, \"tmpout.npy\")\n\n info = {}\n info['Filename'] = input_file\n info['Filesize(MB)'] = round(os.path.getsize(input_file) / (1024 * 1024), 1)\n\n if input_file.lower().endswith((\".gz\", \".zip\")):\n timer.start(\"Decompressing...\")\n input_file = decompr(input_file, target_dir=tmpdir)\n timer.stop()\n\n # Device info\n info_device = get_device_info(input_file)\n\n # Parsing. Main action happens here.\n timer.start(\"Reading file...\")\n info_read = java_read_device(input_file, tmpout, verbose)\n timer.stop()\n\n timer.start(\"Converting to dataframe...\")\n # Load parsed data to a pandas dataframe\n data = npy2df(np.load(tmpout, mmap_mode='r'))\n # Fix if time non-increasing (rarely occurs)\n data, nonincr_time_errs = fix_nonincr_time(data)\n # Update read errors. Non-increasing time errors scaled by sample rate\n info_read['ReadErrors'] += int(np.ceil(nonincr_time_errs / info_read['SampleRate']))\n timer.stop()\n\n info.update({**info_device, **info_read})\n\n return data, info\n\n finally:\n\n # Cleanup, delete temporary directory\n try:\n shutil.rmtree(tmpdir)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\ndef java_read_device(input_file, output_file, verbose):\n \"\"\" Core function that calls the Java method to read device data \"\"\"\n\n setupJVM()\n\n if input_file.lower().endswith('.cwa'):\n info = jpype.JClass('AxivityReader').read(input_file, output_file, verbose)\n\n elif input_file.lower().endswith('.gt3x'):\n info = jpype.JClass('ActigraphReader').read(input_file, output_file, verbose)\n\n elif input_file.lower().endswith('.bin'):\n info = jpype.JClass('GENEActivReader').read(input_file, output_file, verbose)\n\n else:\n raise ValueError(f\"Unknown file extension: {input_file}\")\n\n # Convert the Java HashMap object to Python dictionary\n info = {str(k): str(info[k]) for k in info}\n info['ReadOK'] = int(info['ReadOK'])\n info['ReadErrors'] = int(info['ReadErrors'])\n info['SampleRate'] = float(info['SampleRate'])\n\n return info\n\n\ndef setupJVM():\n \"\"\" Start JVM. Shutdown at program exit \"\"\"\n if not jpype.isJVMStarted():\n jpype.addClassPath(pathlib.Path(__file__).parent)\n jpype.startJVM(convertStrings=False)\n\n @atexit.register\n def shudownJVM():\n jpype.shutdownJVM()\n\n return\n\n\ndef decompr(input_file, target_dir):\n \"\"\" Decompress file to target_dir \"\"\"\n\n # Only .gz and .zip supported so far\n filename = os.path.basename(input_file)\n uncompr_filename = os.path.splitext(filename)[0]\n newfile = os.path.join(target_dir, uncompr_filename)\n\n if input_file.lower().endswith(\".gz\"):\n with gzip.open(input_file, 'rb') as fin:\n with open(newfile, 'wb') as fout:\n shutil.copyfileobj(fin, fout)\n\n elif input_file.lower().endswith(\".zip\"):\n with zipfile.ZipFile(input_file, 'r') as f:\n f.extractall(target_dir)\n\n return newfile\n\n\ndef npy2df(data):\n \"\"\" Convert numpy array to pandas dataframe.\n Also parse time and set it as index. \"\"\"\n\n data = pd.DataFrame(data)\n data['time'] = data['time'].astype('datetime64[ms]')\n data = data.set_index('time')\n\n return data\n\n\ndef get_device_info(input_file):\n \"\"\" Get serial number of device \"\"\"\n\n info = {}\n\n if input_file.lower().endswith('.bin'):\n info['Device'] = 'GENEActiv'\n info['DeviceID'] = get_genea_id(input_file)\n\n elif input_file.lower().endswith('.cwa'):\n info['Device'] = 'Axivity'\n info['DeviceID'] = get_axivity_id(input_file)\n\n elif input_file.lower().endswith('.gt3x'):\n info['Device'] = 'Actigraph'\n info['DeviceID'] = get_gt3x_id(input_file)\n\n elif input_file.lower().endswith('.csv'):\n info['Device'] = 'unknown (.csv)'\n info['DeviceID'] = 'unknown (.csv)'\n\n else:\n raise ValueError(f\"Unknown file extension: {input_file}\")\n\n return info\n\n\ndef get_axivity_id(cwafile):\n \"\"\" Get serial number of Axivity device \"\"\"\n\n if cwafile.lower().endswith('.gz'):\n f = gzip.open(cwafile, 'rb')\n else:\n f = open(cwafile, 'rb')\n\n header = f.read(2)\n if header == b'MD':\n block_size = struct.unpack('H', f.read(2))[0]\n perform_clear = struct.unpack('B', f.read(1))[0]\n device_id = struct.unpack('H', f.read(2))[0]\n else:\n print(f\"Could not find device id for {cwafile}\")\n device_id = \"unknown\"\n\n f.close()\n\n return device_id\n\n\ndef get_genea_id(binfile):\n \"\"\" Get serial number of GENEActiv device \"\"\"\n\n assert binfile.lower().endswith(\".bin\"), f\"Cannot get device id for {binfile}\"\n\n with open(binfile, 'r') as f: # 'Universal' newline mode\n next(f) # Device Identity\n device_id = next(f).split(':')[1].rstrip() # Device Unique Serial Code:011710\n\n return device_id\n\n\ndef get_gt3x_id(gt3xfile):\n \"\"\" Get serial number of Actigraph device \"\"\"\n\n # Actigraph is actually a zip file?\n assert gt3xfile.lower().endswith(\".gt3x\") and zipfile.is_zipfile(gt3xfile), f\"Cannot get device id for {gt3xfile}\"\n\n with zipfile.ZipFile(gt3xfile, 'r') as z:\n contents = z.infolist()\n\n if 'info.txt' in map(lambda x: x.filename, contents):\n info_file = z.open('info.txt', 'r')\n for line in info_file:\n if line.startswith(b\"Serial Number:\"):\n newline = line.decode(\"utf-8\")\n newline = newline.split(\"Serial Number: \")[1]\n return newline\n else:\n print(\"Could not find info.txt file\")\n return \"unknown\"\n\n\ndef fix_nonincr_time(data):\n \"\"\" Fix if time non-increasing (rarely occurs) \"\"\"\n errs = (data.index.to_series().diff() <= pd.Timedelta(0)).sum()\n if errs > 0:\n print(\"Found non-increasing data timestamps. Fixing...\")\n data = data[data.index.to_series()\n .cummax()\n .diff()\n .fillna(pd.Timedelta(1))\n > pd.Timedelta(0)]\n return data, errs\n\n\nclass Timer:\n def __init__(self, verbose=True):\n self.verbose = verbose\n self.start_time = None\n self.msg = None\n\n def start(self, msg=\"Starting timer...\"):\n assert self.start_time is None, \"Timer is running. Use .stop() to stop it\"\n self.start_time = time.perf_counter()\n self.msg = msg\n if self.verbose:\n print(msg, end=\"\\r\")\n\n def stop(self):\n assert self.start_time is not None, \"Timer is not running. Use .start() to start it\"\n elapsed_time = time.perf_counter() - self.start_time\n if self.verbose:\n print(f\"{self.msg} Done! ({elapsed_time:0.2f}s)\")\n self.start_time = None\n self.msg = None\n" ]
[ [ "numpy.load", "pandas.Timedelta", "pandas.DataFrame", "numpy.ceil" ] ]
russellw/ml
[ "afc51f763185c18fd659264a448ec4a155ee7f4d" ]
[ "expr_2hidden.py" ]
[ "import datetime\nimport logging\nimport math\nimport random\nimport sys\nimport time\n\nimport psutil\nimport skopt\nimport torch\nimport torch.nn as nn\n\nstart = time.time()\n\nprocess = psutil.Process()\nrandom.seed(0)\n\nlogger = logging.getLogger()\nlogger.addHandler(\n logging.FileHandler(datetime.datetime.now().strftime(\"logs/%Y-%m-%d %H%M%S.log\"))\n)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\nlogger.setLevel(logging.DEBUG)\n\n\ndef prn(a=\"\"):\n logger.info(str(a))\n\n\ndef debug(a):\n logger.debug(str(a), stack_info=True)\n\n\nprn(sys.argv)\n\nops = \"+\", \"-\", \"*\", \"/\", \"sqrt\"\nleaves = 0.0, 1.0\npunct = \"(\", \")\"\n\ntokens = {}\nfor a in ops + leaves + punct:\n tokens[a] = len(tokens)\n\n\ndef arity(o):\n if o == \"sqrt\":\n return 1\n return 2\n\n\ndef randcode(depth):\n if depth:\n o = random.choice(ops)\n return [o] + [randcode(depth - 1) for i in range(arity(o))]\n return random.choice(leaves)\n\n\ndef evaluate(a):\n if isinstance(a, list) or isinstance(a, tuple):\n try:\n a = list(map(evaluate, a))\n o = a[0]\n x = a[1]\n if o == \"sqrt\":\n return math.sqrt(x)\n y = a[2]\n return eval(f\"x {o} y\")\n except (ValueError, ZeroDivisionError):\n return 0.0\n return a\n\n\nexprs = [randcode(3) for i in range(10000)]\nprn(exprs[:10])\n\noutputs = list(map(evaluate, exprs))\nprn(outputs[:10])\n\n\ndef serial(a):\n r = []\n\n def rec(a):\n if isinstance(a, list) or isinstance(a, tuple):\n r.append(\"(\")\n for b in a:\n rec(b)\n r.append(\")\")\n return\n r.append(a)\n\n rec(a)\n return r\n\n\nexprs = list(map(serial, exprs))\n\n\ndef translate(s):\n return [tokens[c] for c in s]\n\n\nexprs = list(map(translate, exprs))\n\n# pad each string with EOF to make them all the same length\nmaxlen = max(map(len, exprs))\nprn(f\"maxlen: {maxlen}\")\n\n\ndef pad(s):\n s = s[:maxlen]\n return s + [len(tokens)] * (maxlen - len(s))\n\n\nexprs = list(map(pad, exprs))\n\n# convert string of numbers to one-hot channels\nnchannels = len(tokens) + 1\n\n\ndef one_hot(s):\n r = []\n for i in range(nchannels):\n r.extend([int(x == i) for x in s])\n # r.append([int(x == i) for x in s])\n return r\n\n\nexprs = list(map(one_hot, exprs))\n\n\ndef tensors(exprs, outputs):\n x = torch.tensor(exprs, dtype=torch.float32)\n y = torch.tensor(outputs, dtype=torch.float32)\n y = y.view((y.shape[0], 1))\n return x, y\n\n\nn = len(exprs)\nvalid_i = n * 3 // 5\ntest_i = n * 4 // 5\ntrain_x, train_y = tensors(exprs[:valid_i], outputs[:valid_i])\nvalid_x, valid_y = tensors(exprs[valid_i:test_i], outputs[valid_i:test_i])\ntest_x, test_y = tensors(exprs[test_i:], outputs[test_i:])\n\n# https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity\n# the commented out ones are inapplicable, need extra parameters, or blew up in testing\nactivations = {\n # \"AdaptiveLogSoftmaxWithLoss\": nn.AdaptiveLogSoftmaxWithLoss,\n # \"CELU\": nn.CELU,\n # \"ELU\": nn.ELU,\n # \"GELU\": nn.GELU,\n \"Hardshrink\": nn.Hardshrink,\n \"Hardsigmoid\": nn.Hardsigmoid,\n # \"Hardswish\": nn.Hardswish,\n \"Hardtanh\": nn.Hardtanh,\n \"LeakyReLU\": nn.LeakyReLU,\n \"LogSigmoid\": nn.LogSigmoid,\n # \"LogSoftmax\": nn.LogSoftmax,\n # \"MultiheadAttention\": nn.MultiheadAttention,\n # \"PReLU\": nn.PReLU,\n # \"RReLU\": nn.RReLU,\n \"ReLU\": nn.ReLU,\n \"ReLU6\": nn.ReLU6,\n # \"SELU\": nn.SELU,\n \"Sigmoid\": nn.Sigmoid,\n # \"Softmax\": nn.Softmax,\n # \"Softmax2d\": nn.Softmax2d,\n # \"Softmin\": nn.Softmin,\n \"Softplus\": nn.Softplus,\n # \"Softshrink\": nn.Softshrink,\n \"Softsign\": nn.Softsign,\n # \"Tanh\": nn.Tanh,\n \"Tanhshrink\": nn.Tanhshrink,\n # \"Threshold\": nn.Threshold,\n}\n\n# LBFGS needs an extra closure parameter\n# SparseAdam does not support dense gradients, please consider Adam instead\noptims = {\n \"Adadelta\": torch.optim.Adadelta,\n \"Adagrad\": torch.optim.Adagrad,\n \"Adam\": torch.optim.Adam,\n \"Adamax\": torch.optim.Adamax,\n \"ASGD\": torch.optim.ASGD,\n \"RMSprop\": torch.optim.RMSprop,\n \"Rprop\": torch.optim.Rprop,\n \"SGD\": torch.optim.SGD,\n}\n\nspace = [\n skopt.space.Integer(1, 1000, name=\"hidden1\"),\n skopt.space.Integer(1, 1000, name=\"hidden2\"),\n skopt.space.Categorical(activations.keys(), name=\"activation\"),\n skopt.space.Categorical(optims.keys(), name=\"optim\"),\n skopt.space.Real(10 ** -4, 0.5, \"log-uniform\", name=\"lr\"),\n]\n\n\ndef hparam(hparams, name):\n for i in range(len(hparams)):\n if space[i].name == name:\n return hparams[i]\n raise ValueError(name)\n\n\nclass Net(nn.Module):\n def __init__(self, hidden1, hidden2, activation):\n super(Net, self).__init__()\n self.activation = activation()\n\n self.layer1 = nn.Linear(nchannels * maxlen, hidden1)\n self.layer2 = nn.Linear(hidden1, hidden2)\n self.out = nn.Linear(hidden2, 1)\n\n def forward(self, x):\n x = self.activation(self.layer1(x))\n x = self.activation(self.layer2(x))\n return self.out(x)\n\n\ncriterion = nn.MSELoss()\ncount = 0\nprn()\n\n\ndef train(hparams):\n global count\n prn(count)\n if isinstance(count, int):\n count += 1\n prn(hparams)\n\n model = Net(\n hparam(hparams, \"hidden1\"),\n hparam(hparams, \"hidden2\"),\n activations[hparam(hparams, \"activation\")],\n )\n optim = optims[hparam(hparams, \"optim\")]\n optimizer = optim(model.parameters(), lr=hparam(hparams, \"lr\"))\n prn(f\"{process.memory_info().rss:,} bytes\")\n\n epochs = 1000\n for epoch in range(epochs + 1):\n # show progress\n if epoch % (epochs // 10) == 0:\n train_cost = criterion(model(train_x), train_y).item()\n valid_cost = criterion(model(valid_x), valid_y).item()\n test_cost = criterion(model(test_x), test_y).item()\n prn(f\"{epoch:6d} {train_cost:10f} {valid_cost:10f} {test_cost:10f}\")\n\n # forward\n output = model(train_x)\n cost = criterion(output, train_y)\n\n # backward\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n prn()\n return valid_cost\n\n\n# search hyperparameters\n# n_calls defaults to 100\nres = skopt.gp_minimize(train, space, n_calls=100)\n\n# train once more with best hyperparameters\ncount = \"final\"\ntrain(res.x)\n\nseconds = time.time() - start\nprn(f\"{seconds:.3f} seconds\")\nprn(datetime.timedelta(seconds=seconds))\n" ]
[ [ "torch.nn.Linear", "torch.nn.MSELoss", "torch.tensor" ] ]
lewyan/tensorflow-nmt
[ "6f954c2713dbcf00778f4f303b5bac7968b7fb4b" ]
[ "nmt/nmt.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"TensorFlow NMT model implementation.\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport random\nimport sys\n\n# import matplotlib.image as mpimg\nimport numpy as np\nimport tensorflow as tf\n\nfrom . import inference\nfrom . import train\nfrom .utils import misc_utils as utils\nfrom .utils import vocab_utils\nfrom .utils import evaluation_utils\n\nutils.check_tensorflow_version()\n\nFLAGS = None\n\n\ndef create_hparams():\n \"\"\"Create training hparams.\"\"\"\n return tf.contrib.training.HParams(\n # Data\n src=FLAGS.src,\n tgt=FLAGS.tgt,\n train_prefix=FLAGS.train_prefix,\n dev_prefix=FLAGS.dev_prefix,\n test_prefix=FLAGS.test_prefix,\n vocab_prefix=FLAGS.vocab_prefix,\n out_dir=FLAGS.out_dir,\n\n # Networks\n num_units=FLAGS.num_units,\n num_layers=FLAGS.num_layers,\n dropout=FLAGS.dropout,\n unit_type=FLAGS.unit_type,\n encoder_type=FLAGS.encoder_type,\n residual=FLAGS.residual,\n time_major=FLAGS.time_major,\n\n # Attention mechanisms\n attention=FLAGS.attention,\n attention_architecture=FLAGS.attention_architecture,\n pass_hidden_state=FLAGS.pass_hidden_state,\n\n # Train\n optimizer=FLAGS.optimizer,\n num_train_steps=FLAGS.num_train_steps,\n batch_size=FLAGS.batch_size,\n init_weight=FLAGS.init_weight,\n max_gradient_norm=FLAGS.max_gradient_norm,\n learning_rate=FLAGS.learning_rate,\n start_decay_step=FLAGS.start_decay_step,\n decay_factor=FLAGS.decay_factor,\n decay_steps=FLAGS.decay_steps,\n colocate_gradients_with_ops=FLAGS.colocate_gradients_with_ops,\n\n # Data constraints\n num_buckets=FLAGS.num_buckets,\n max_train=FLAGS.max_train,\n src_max_len=FLAGS.src_max_len,\n tgt_max_len=FLAGS.tgt_max_len,\n source_reverse=FLAGS.source_reverse,\n\n # Inference\n src_max_len_infer=FLAGS.src_max_len_infer,\n tgt_max_len_infer=FLAGS.tgt_max_len_infer,\n infer_batch_size=FLAGS.infer_batch_size,\n beam_width=FLAGS.beam_width,\n length_penalty_weight=FLAGS.length_penalty_weight,\n\n # Vocab\n sos=FLAGS.sos if FLAGS.sos else vocab_utils.SOS,\n eos=FLAGS.eos if FLAGS.eos else vocab_utils.EOS,\n bpe_delimiter=FLAGS.bpe_delimiter,\n\n # Misc\n forget_bias=FLAGS.forget_bias,\n num_gpus=FLAGS.num_gpus,\n epoch_step=0, # record where we were within an epoch.\n steps_per_stats=FLAGS.steps_per_stats,\n steps_per_external_eval=FLAGS.steps_per_external_eval,\n share_vocab=FLAGS.share_vocab,\n metrics=FLAGS.metrics.split(\",\"),\n log_device_placement=FLAGS.log_device_placement,\n random_seed=FLAGS.random_seed,\n )\n\n\ndef extend_hparams(hparams):\n \"\"\"Extend training hparams.\"\"\"\n # Sanity checks\n if hparams.encoder_type == \"bi\" and hparams.num_layers % 2 != 0:\n raise ValueError(\"For bi, num_layers %d should be even\" %\n hparams.num_layers)\n if (hparams.attention_architecture in [\"gnmt\"] and\n hparams.num_layers < 2):\n raise ValueError(\"For gnmt attention architecture, \"\n \"num_layers %d should be >= 2\" % hparams.num_layers)\n\n # Flags\n utils.print_out(\"# hparams:\")\n utils.print_out(\" src=%s\" % hparams.src)\n utils.print_out(\" tgt=%s\" % hparams.tgt)\n utils.print_out(\" train_prefix=%s\" % hparams.train_prefix)\n utils.print_out(\" dev_prefix=%s\" % hparams.dev_prefix)\n utils.print_out(\" test_prefix=%s\" % hparams.test_prefix)\n utils.print_out(\" out_dir=%s\" % hparams.out_dir)\n\n # Set num_residual_layers\n if hparams.residual and hparams.num_layers > 1:\n if hparams.encoder_type == \"gnmt\":\n # The first unidirectional layer (after the bi-directional layer) in\n # the GNMT encoder can't have residual connection due to the input is\n # the concatenation of fw_cell and bw_cell's outputs.\n num_residual_layers = hparams.num_layers - 2\n else:\n num_residual_layers = hparams.num_layers - 1\n else:\n num_residual_layers = 0\n hparams.add_hparam(\"num_residual_layers\", num_residual_layers)\n\n ## Vocab\n # Get vocab file names first\n if hparams.vocab_prefix:\n src_vocab_file = hparams.vocab_prefix + \".\" + hparams.src\n tgt_vocab_file = hparams.vocab_prefix + \".\" + hparams.tgt\n else:\n raise ValueError(\"hparams.vocab_prefix must be provided.\")\n\n # Source vocab\n src_vocab_size, src_vocab_file = vocab_utils.check_vocab(\n src_vocab_file,\n hparams.out_dir,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n\n # Target vocab\n if hparams.share_vocab:\n utils.print_out(\" using source vocab for target\")\n tgt_vocab_file = src_vocab_file\n tgt_vocab_size = src_vocab_size\n else:\n tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(\n tgt_vocab_file,\n hparams.out_dir,\n sos=hparams.sos,\n eos=hparams.eos,\n unk=vocab_utils.UNK)\n hparams.add_hparam(\"src_vocab_size\", src_vocab_size)\n hparams.add_hparam(\"tgt_vocab_size\", tgt_vocab_size)\n hparams.add_hparam(\"src_vocab_file\", src_vocab_file)\n hparams.add_hparam(\"tgt_vocab_file\", tgt_vocab_file)\n\n # Check out_dir\n if not tf.gfile.Exists(hparams.out_dir):\n utils.print_out(\"# Creating output directory %s ...\" % hparams.out_dir)\n tf.gfile.MakeDirs(hparams.out_dir)\n\n # Evaluation\n for metric in hparams.metrics:\n hparams.add_hparam(\"best_\" + metric, 0) # larger is better\n best_metric_dir = os.path.join(hparams.out_dir, \"best_\" + metric)\n hparams.add_hparam(\"best_\" + metric + \"_dir\", best_metric_dir)\n tf.gfile.MakeDirs(best_metric_dir)\n\n return hparams\n\n\ndef ensure_compatible_hparams(hparams):\n \"\"\"Make sure the loaded hparams is compatible with new changes.\"\"\"\n new_hparams = create_hparams()\n new_hparams = utils.maybe_parse_standard_hparams(\n new_hparams, FLAGS.hparams_path)\n new_hparams = extend_hparams(new_hparams)\n\n # For compatible reason, if there are new fields in new_hparams,\n # we add them to the current hparams\n new_config = new_hparams.values()\n config = hparams.values()\n for key in new_config:\n if key not in config:\n hparams.add_hparam(key, new_config[key])\n\n # Make sure that the loaded model has latest values for the below keys\n updated_keys = [\n \"out_dir\", \"num_gpus\", \"test_prefix\", \"beam_width\",\n \"length_penalty_weight\", \"num_train_steps\"\n ]\n for key in updated_keys:\n if key in new_config and getattr(hparams, key) != new_config[key]:\n utils.print_out(\"# Updating hparams.%s: %s -> %s\" %\n (key, str(getattr(hparams, key)), str(new_config[key])))\n setattr(hparams, key, new_config[key])\n return hparams\n\n\ndef load_train_hparams(out_dir):\n \"\"\"Load training hparams.\"\"\"\n hparams = utils.load_hparams(out_dir)\n\n if not hparams:\n hparams = create_hparams()\n hparams = utils.maybe_parse_standard_hparams(\n hparams, FLAGS.hparams_path)\n hparams = extend_hparams(hparams)\n else:\n hparams = ensure_compatible_hparams(hparams)\n\n # Save HParams\n utils.save_hparams(out_dir, hparams)\n\n for metric in hparams.metrics:\n utils.save_hparams(getattr(hparams, \"best_\" + metric + \"_dir\"), hparams)\n\n # Print HParams\n utils.print_hparams(hparams)\n return hparams\n\n\ndef main(unused_argv):\n # Job\n jobid = FLAGS.jobid\n num_workers = FLAGS.num_workers\n utils.print_out(\"# Job id %d\" % jobid)\n\n # Random\n random_seed = FLAGS.random_seed\n if random_seed is not None and random_seed > 0:\n utils.print_out(\"# Set random seed to %d\" % random_seed)\n random.seed(random_seed + jobid)\n np.random.seed(random_seed + jobid)\n\n ## Train / Decode\n out_dir = FLAGS.out_dir\n if FLAGS.inference_input_file:\n # Model dir\n if FLAGS.model_dir:\n model_dir = FLAGS.model_dir\n else:\n model_dir = out_dir\n\n # Load hparams.\n hparams = inference.load_inference_hparams(\n model_dir,\n inference_list=FLAGS.inference_list)\n hparams = ensure_compatible_hparams(hparams)\n utils.print_hparams(hparams)\n\n # Inference\n trans_file = FLAGS.inference_output_file\n inference.inference(model_dir, FLAGS.inference_input_file,\n trans_file, hparams, num_workers, jobid)\n\n # Evaluation\n ref_file = FLAGS.inference_ref_file\n if ref_file and tf.gfile.Exists(trans_file):\n for metric in hparams.metrics:\n score = evaluation_utils.evaluate(\n ref_file,\n trans_file,\n metric,\n hparams.bpe_delimiter)\n utils.print_out(\" %s: %.1f\" % (metric, score))\n else:\n if not tf.gfile.Exists(out_dir): tf.gfile.MakeDirs(out_dir)\n\n # Load hparams.\n hparams = load_train_hparams(out_dir)\n\n # Train\n train.train(hparams)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n\n # network\n parser.add_argument(\"--num_units\", type=int, default=32, help=\"Network size.\")\n parser.add_argument(\"--num_layers\", type=int, default=2,\n help=\"Network depth.\")\n parser.add_argument(\"--encoder_type\", type=str, default=\"uni\", help=\"\"\"\\\n uni | bi | gnmt. For bi, we build num_layers/2 bi-directional layers.For\n gnmt, we build 1 bi-directional layer, and (num_layers - 1) uni-\n directional layers.\\\n \"\"\")\n parser.add_argument(\"--residual\", type=\"bool\", nargs=\"?\", const=True,\n default=False,\n help=\"Whether to add residual connections.\")\n parser.add_argument(\"--time_major\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"Whether to use time-major mode for dynamic RNN.\")\n\n # attention mechanisms\n parser.add_argument(\"--attention\", type=str, default=\"\", help=\"\"\"\\\n luong | scaled_luong | bahdanau | normed_bahdanau or set to \"\" for no\n attention\\\n \"\"\")\n parser.add_argument(\n \"--attention_architecture\",\n type=str,\n default=\"standard\",\n help=\"\"\"\\\n standard | gnmt | gnmt_v2.\n standard: use top layer to compute attention.\n gnmt: GNMT style of computing attention, use previous bottom layer to\n compute attention.\n gnmt_v2: similar to gnmt, but use current bottom layer to compute\n attention.\\\n \"\"\")\n parser.add_argument(\n \"--pass_hidden_state\", type=\"bool\", nargs=\"?\", const=True,\n default=True,\n help=\"\"\"\\\n Whether to pass encoder's hidden state to decoder when using an attention\n based model.\\\n \"\"\")\n\n # optimizer\n parser.add_argument(\"--optimizer\", type=str, default=\"sgd\", help=\"sgd | adam\")\n parser.add_argument(\"--learning_rate\", type=float, default=1.0,\n help=\"Learning rate. Adam: 0.001 | 0.0001\")\n parser.add_argument(\"--start_decay_step\", type=int, default=0,\n help=\"When we start to decay\")\n parser.add_argument(\"--decay_steps\", type=int, default=10000,\n help=\"How frequent we decay\")\n parser.add_argument(\"--decay_factor\", type=float, default=0.98,\n help=\"How much we decay.\")\n parser.add_argument(\n \"--num_train_steps\", type=int, default=12000, help=\"Num steps to train.\")\n parser.add_argument(\"--colocate_gradients_with_ops\", type=\"bool\", nargs=\"?\",\n const=True,\n default=True,\n help=(\"Whether try colocating gradients with \"\n \"corresponding op\"))\n\n # data\n parser.add_argument(\"--src\", type=str, default=None,\n help=\"Source suffix, e.g., en.\")\n parser.add_argument(\"--tgt\", type=str, default=None,\n help=\"Target suffix, e.g., de.\")\n parser.add_argument(\"--train_prefix\", type=str, default=None,\n help=\"Train prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--dev_prefix\", type=str, default=None,\n help=\"Dev prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--test_prefix\", type=str, default=None,\n help=\"Test prefix, expect files with src/tgt suffixes.\")\n parser.add_argument(\"--out_dir\", type=str, default=None,\n help=\"Store log/model files.\")\n\n # Vocab\n parser.add_argument(\"--vocab_prefix\", type=str, default=None, help=\"\"\"\\\n Vocab prefix, expect files with src/tgt suffixes.If None, extract from\n train files.\\\n \"\"\")\n parser.add_argument(\"--sos\", type=str, default=\"<s>\",\n help=\"Start-of-sentence symbol.\")\n parser.add_argument(\"--eos\", type=str, default=\"</s>\",\n help=\"End-of-sentence symbol.\")\n parser.add_argument(\"--share_vocab\", type=\"bool\", nargs=\"?\", const=True,\n default=False,\n help=\"\"\"\\\n Whether to use the source vocab and embeddings for both source and\n target.\\\n \"\"\")\n\n # Sequence lengths\n parser.add_argument(\"--src_max_len\", type=int, default=50,\n help=\"Max length of src sequences during training.\")\n parser.add_argument(\"--tgt_max_len\", type=int, default=50,\n help=\"Max length of tgt sequences during training.\")\n parser.add_argument(\"--src_max_len_infer\", type=int, default=None,\n help=\"Max length of src sequences during inference.\")\n parser.add_argument(\"--tgt_max_len_infer\", type=int, default=None,\n help=\"\"\"\\\n Max length of tgt sequences during inference. Also use to restrict the\n maximum decoding length.\\\n \"\"\")\n\n # Default settings works well (rarely need to change)\n parser.add_argument(\"--unit_type\", type=str, default=\"lstm\",\n help=\"lstm | gru\")\n parser.add_argument(\"--forget_bias\", type=float, default=1.0,\n help=\"Forget bias for BasicLSTMCell.\")\n parser.add_argument(\"--dropout\", type=float, default=0.2,\n help=\"Dropout rate (not keep_prob)\")\n parser.add_argument(\"--max_gradient_norm\", type=float, default=5.0,\n help=\"Clip gradients to this norm.\")\n parser.add_argument(\"--init_weight\", type=float, default=0.1,\n help=\"Initial weights from [-this, this].\")\n parser.add_argument(\"--source_reverse\", type=\"bool\", nargs=\"?\", const=True,\n default=False, help=\"Reverse source sequence.\")\n parser.add_argument(\"--batch_size\", type=int, default=128, help=\"Batch size.\")\n\n parser.add_argument(\"--steps_per_stats\", type=int, default=100,\n help=(\"How many training steps to do per stats logging.\"\n \"Save checkpoint every 10x steps_per_stats\"))\n parser.add_argument(\"--max_train\", type=int, default=0,\n help=\"Limit on the size of training data (0: no limit).\")\n parser.add_argument(\"--num_buckets\", type=int, default=5,\n help=\"Put data into similar-length buckets.\")\n\n # BPE\n parser.add_argument(\"--bpe_delimiter\", type=str, default=None,\n help=\"Set to @@ to activate BPE\")\n\n # Misc\n parser.add_argument(\"--num_gpus\", type=int, default=1,\n help=\"Number of gpus in each worker.\")\n parser.add_argument(\"--log_device_placement\", type=\"bool\", nargs=\"?\",\n const=True, default=False, help=\"Debug GPU allocation.\")\n parser.add_argument(\"--metrics\", type=str, default=\"bleu\",\n help=(\"Comma-separated list of evaluations \"\n \"metrics (bleu,rouge,accuracy)\"))\n parser.add_argument(\"--steps_per_external_eval\", type=int, default=None,\n help=\"\"\"\\\n How many training steps to do per external evaluation. Automatically set\n based on data if None.\\\n \"\"\")\n parser.add_argument(\"--scope\", type=str, default=None,\n help=\"scope to put variables under\")\n parser.add_argument(\"--hparams_path\", type=str, default=None,\n help=(\"Path to standard hparams json file that overrides\"\n \"hparams values from FLAGS.\"))\n parser.add_argument(\"--random_seed\", type=int, default=None,\n help=\"Random seed (>0, set a specific seed).\")\n\n\n # Inference\n parser.add_argument(\"--model_dir\", type=str, default=\"\",\n help=\"To load model for inference.\")\n parser.add_argument(\"--inference_input_file\", type=str, default=None,\n help=\"Set to the text to decode.\")\n parser.add_argument(\"--inference_list\", type=str, default=None,\n help=(\"A comma-separated list of sentence indices \"\n \"(0-based) to decode.\"))\n parser.add_argument(\"--infer_batch_size\", type=int, default=32,\n help=\"Batch size for inference mode.\")\n parser.add_argument(\"--inference_output_file\", type=str, default=None,\n help=\"Output file to store decoding results.\")\n parser.add_argument(\"--inference_ref_file\", type=str, default=None,\n help=\"To compute evaluation scores if provided.\")\n parser.add_argument(\"--beam_width\", type=int, default=0,\n help=(\"\"\"\\\n beam width when using beam search decoder. If 0 (default), use standard\n decoder with greedy helper.\\\n \"\"\"))\n parser.add_argument(\"--length_penalty_weight\", type=float, default=0.0,\n help=\"Length penalty for beam search.\")\n\n # Job info\n parser.add_argument(\"--jobid\", type=int, default=0,\n help=\"Task id of the worker.\")\n parser.add_argument(\"--num_workers\", type=int, default=1,\n help=\"Number of workers (inference only).\")\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.gfile.Exists", "tensorflow.gfile.MakeDirs", "numpy.random.seed", "tensorflow.app.run" ] ]
piedrro/napari-akseg
[ "305cbeb23c60b0371829e0937340f0fe0d0540a7" ]
[ "src/napari_akseg/_widget.py" ]
[ "\"\"\"\nThis module is an example of a barebones QWidget plugin for napari\n\nIt implements the Widget specification.\nsee: https://napari.org/plugins/guides.html?#widgets\n\nReplace code below according to your needs.\n\"\"\"\n\n\nfrom qtpy.QtWidgets import (QWidget,QVBoxLayout,QTabWidget,QCheckBox,QLabel,QLineEdit,QFileDialog,\n QComboBox,QPushButton,QProgressBar,QTextEdit,QSlider)\nfrom qtpy.QtCore import (QObject,QRunnable,QThreadPool)\nfrom PyQt5.QtCore import pyqtSignal,pyqtSlot\nimport sys\nfrom functools import partial\nimport os\nimport traceback\nimport napari\nimport numpy as np\nimport time\nimport cv2\nimport pandas as pd\nfrom glob2 import glob\nimport napari_akseg._utils\nfrom napari_akseg._utils import unstack_images, align_image_channels\n\nos.environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\n\nclass WorkerSignals(QObject):\n '''\n Defines the signals available from a running worker thread.\n\n Supported signals are:\n\n finished\n No data\n\n error\n tuple (exctype, value, traceback.format_exc() )\n\n result\n object data returned from processing, anything\n\n progress\n int indicating % progress\n\n '''\n finished = pyqtSignal()\n error = pyqtSignal(tuple)\n result = pyqtSignal(object)\n progress = pyqtSignal(int)\n\n\nclass Worker(QRunnable):\n '''\n Worker thread\n\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n\n '''\n\n def __init__(self, fn, *args, **kwargs):\n super(Worker, self).__init__()\n\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = WorkerSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n '''\n Initialise the runner function with passed args, kwargs.\n '''\n\n # Retrieve args/kwargs here; and fire processing using them\n try:\n result = self.fn(*self.args, **self.kwargs)\n except:\n traceback.print_exc()\n exctype, value = sys.exc_info()[:2]\n self.signals.error.emit((exctype, value, traceback.format_exc()))\n else:\n self.signals.result.emit(result) # Return the result of the processing\n finally:\n self.signals.finished.emit() # Done\n\n def result(self):\n\n return self.fn(*self.args, **self.kwargs)\n\n\nclass AKSEG(QWidget):\n \"\"\"Widget allows selection of two labels layers and returns a new layer\n highlighing pixels whose values differ between the two layers.\"\"\"\n\n def __init__(self, viewer: napari.Viewer):\n \"\"\"Initialize widget with two layer combo boxes and a run button\n\n \"\"\"\n\n super().__init__()\n\n # import functions\n from napari_akseg._utils_database import (populate_upload_combos, check_database_access,\n update_akmetadata, _populateUSERMETA)\n from napari_akseg._utils import stack_images, _manualImport\n from napari_akseg.akseg_ui import Ui_tab_widget\n from napari_akseg._utils_cellpose import _open_cellpose_model\n from napari_akseg._utils_iterface_events import (_modifyMode, _viewerControls,_copymasktoall,\n _deleteallmasks, _imageControls, _segmentationEvents)\n\n self.populate_upload_combos = partial(populate_upload_combos, self)\n self.check_database_access = partial(check_database_access,self)\n self.update_akmetadata = partial(update_akmetadata, self)\n self.stack_image = partial(stack_images, self)\n self._open_cellpose_model = partial(_open_cellpose_model, self)\n self._modifyMode = partial(_modifyMode, self)\n self._viewerControls = partial(_viewerControls, self)\n self._copymasktoall = partial(_copymasktoall, self)\n self._deleteallmasks = partial(_deleteallmasks, self)\n self._populateUSERMETA = partial(_populateUSERMETA, self)\n self._imageControls = partial(_imageControls, self)\n self._segmentationEvents = partial(_segmentationEvents, self)\n self._manualImport = partial(_manualImport, self)\n\n\n application_path = os.path.dirname(sys.executable)\n self.viewer = viewer\n self.setLayout(QVBoxLayout())\n\n # ui_path = os.path.abspath(r\"C:\\napari-akseg\\src\\napari_akseg\\akseg_ui.ui\")\n # self.akseg_ui = uic.loadUi(ui_path)\n #command to refresh ui file: pyuic5 akseg_ui.ui -o akseg_ui.py\n\n self.form = Ui_tab_widget()\n self.akseg_ui = QTabWidget()\n self.form.setupUi(self.akseg_ui)\n\n # add widget_gui layout to main layout\n self.layout().addWidget(self.akseg_ui)\n\n # general references from Qt Desinger References\n self.tab_widget = self.findChild(QTabWidget, \"tab_widget\")\n\n # import controls from Qt Desinger References\n self.path_list = []\n self.active_import_mode = \"\"\n self.import_mode = self.findChild(QComboBox, \"import_mode\")\n self.import_filemode = self.findChild(QComboBox, \"import_filemode\")\n self.import_precision = self.findChild(QComboBox, \"import_precision\")\n self.import_import = self.findChild(QPushButton, \"import_import\")\n self.import_limit = self.findChild(QComboBox, \"import_limit\")\n self.clear_previous = self.findChild(QCheckBox, \"import_clear_previous\")\n self.autocontrast = self.findChild(QCheckBox, \"import_auto_contrast\")\n self.import_multiframe_mode = self.findChild(QComboBox, \"import_multiframe_mode\")\n self.import_crop_mode = self.findChild(QComboBox, \"import_crop_mode\")\n self.laser_mode = self.findChild(QComboBox, \"nim_laser_mode\")\n self.channel_mode = self.findChild(QComboBox, \"nim_channel_mode\")\n self.import_progressbar = self.findChild(QProgressBar, \"import_progressbar\")\n self.import_align = self.findChild(QCheckBox, \"import_align\")\n\n # cellpose controls + variabes from Qt Desinger References\n self.cellpose_segmentation = False\n self.cellpose_load_model = self.findChild(QPushButton, \"cellpose_load_model\")\n self.cellpose_custom_model = self.findChild(QTextEdit, \"cellpose_custom_model\")\n self.cellpose_custom_model_path = \"\"\n self.cellpose_model = self.findChild(QComboBox, \"cellpose_model\")\n self.cellpose_segchannel = self.findChild(QComboBox, \"cellpose_segchannel\")\n self.cellpose_flowthresh = self.findChild(QSlider, \"cellpose_flowthresh\")\n self.cellpose_flowthresh_label = self.findChild(QLabel, \"cellpose_flowthresh_label\")\n self.cellpose_maskthresh = self.findChild(QSlider, \"cellpose_maskthresh\")\n self.cellpose_maskthresh_label = self.findChild(QLabel, \"cellpose_maskthresh_label\")\n self.cellpose_minsize = self.findChild(QSlider, \"cellpose_minsize\")\n self.cellpose_minsize_label = self.findChild(QLabel, \"cellpose_minsize_label\")\n self.cellpose_diameter = self.findChild(QSlider, \"cellpose_diameter\")\n self.cellpose_diameter_label = self.findChild(QLabel, \"cellpose_diameter_label\")\n self.cellpose_segment_active = self.findChild(QPushButton, \"cellpose_segment_active\")\n self.cellpose_segment_all = self.findChild(QPushButton, \"cellpose_segment_all\")\n self.cellpose_clear_previous = self.findChild(QCheckBox, \"cellpose_clear_previous\")\n self.cellpose_usegpu = self.findChild(QCheckBox, \"cellpose_usegpu\")\n self.cellpose_resetimage = self.findChild(QCheckBox, \"cellpose_resetimage\")\n self.cellpose_stop = self.findChild(QPushButton, \"cellpose_stop\")\n self.cellpose_progressbar = self.findChild(QProgressBar, \"cellpose_progressbar\")\n\n # modify tab controls + variables from Qt Desinger References\n self.interface_mode = \"panzoom\"\n self.segmentation_mode = \"add\"\n self.class_mode = \"single\"\n self.class_colour = 1\n self.modify_panzoom = self.findChild(QPushButton, \"modify_panzoom\")\n self.modify_segment = self.findChild(QPushButton, \"modify_segment\")\n self.modify_classify = self.findChild(QPushButton, \"modify_classify\")\n self.modify_refine = self.findChild(QPushButton, \"modify_refine\")\n self.refine_channel = self.findChild(QComboBox, \"refine_channel\")\n self.refine_all = self.findChild(QPushButton, \"refine_all\")\n self.modify_copymasktoall = self.findChild(QPushButton, \"modify_copymasktoall\")\n self.modify_deleteallmasks = self.findChild(QPushButton, \"modify_deleteallmasks\")\n self.modify_progressbar = self.findChild(QProgressBar, \"modify_progressbar\")\n\n self.modify_auto_panzoom = self.findChild(QCheckBox, \"modify_auto_panzoom\")\n self.modify_add = self.findChild(QPushButton, \"modify_add\")\n self.modify_extend = self.findChild(QPushButton, \"modify_extend\")\n self.modify_split = self.findChild(QPushButton, \"modify_split\")\n self.modify_join = self.findChild(QPushButton, \"modify_join\")\n self.modify_delete = self.findChild(QPushButton, \"modify_delete\")\n self.classify_single = self.findChild(QPushButton, \"classify_single\")\n self.classify_dividing = self.findChild(QPushButton, \"classify_dividing\")\n self.classify_divided = self.findChild(QPushButton, \"classify_divided\")\n self.classify_vertical = self.findChild(QPushButton, \"classify_vertical\")\n self.classify_broken = self.findChild(QPushButton, \"classify_broken\")\n self.classify_edge = self.findChild(QPushButton, \"classify_edge\")\n self.modify_viewmasks = self.findChild(QCheckBox, \"modify_viewmasks\")\n self.modify_viewlabels = self.findChild(QCheckBox, \"modify_viewlabels\")\n self.find_next = self.findChild(QPushButton, 'find_next')\n self.find_previous = self.findChild(QPushButton, 'find_previous')\n self.find_criterion = self.findChild(QComboBox, \"find_criterion\")\n self.find_mode = self.findChild(QComboBox, \"find_mode\")\n\n self.modify_panzoom.setEnabled(False)\n self.modify_add.setEnabled(False)\n self.modify_extend.setEnabled(False)\n self.modify_join.setEnabled(False)\n self.modify_split.setEnabled(False)\n self.modify_delete.setEnabled(False)\n self.modify_refine.setEnabled(False)\n self.classify_single.setEnabled(False)\n self.classify_dividing.setEnabled(False)\n self.classify_divided.setEnabled(False)\n self.classify_vertical.setEnabled(False)\n self.classify_broken.setEnabled(False)\n self.classify_edge.setEnabled(False)\n\n # upload tab controls from Qt Desinger References\n self.database_path = \"\"\n self.upload_segmented = self.findChild(QCheckBox, \"upload_segmented\")\n self.upload_labelled = self.findChild(QCheckBox, \"upload_labelled\")\n self.upload_segcurated = self.findChild(QCheckBox, \"upload_segcurated\")\n self.upload_classcurated = self.findChild(QCheckBox, \"upload_classcurated\")\n self.upload_initial = self.findChild(QComboBox, \"upload_initial\")\n self.upload_content = self.findChild(QComboBox, \"upload_content\")\n self.upload_microscope = self.findChild(QComboBox, \"upload_microscope\")\n self.upload_modality = self.findChild(QComboBox, \"upload_modality\")\n self.upload_illumination = self.findChild(QComboBox, \"upload_illumination\")\n self.upload_stain = self.findChild(QComboBox, \"upload_stain\")\n self.upload_antibiotic = self.findChild(QComboBox, \"upload_antibiotic\")\n self.upload_abxconcentration = self.findChild(QComboBox, \"upload_abxconcentration\")\n self.upload_treatmenttime = self.findChild(QComboBox, \"upload_treatmenttime\")\n self.upload_mount = self.findChild(QComboBox, \"upload_mount\")\n self.upload_protocol = self.findChild(QComboBox, \"upload_protocol\")\n self.upload_usermeta1 = self.findChild(QComboBox, \"upload_usermeta1\")\n self.upload_usermeta2 = self.findChild(QComboBox, \"upload_usermeta2\")\n self.upload_usermeta3 = self.findChild(QComboBox, \"upload_usermeta3\")\n self.upload_overwrite_images = self.findChild(QCheckBox, \"upload_overwrite_images\")\n self.upload_overwrite_masks = self.findChild(QCheckBox, \"upload_overwrite_masks\")\n self.overwrite_selected_metadata = self.findChild(QCheckBox, \"overwrite_selected_metadata\")\n self.overwrite_all_metadata = self.findChild(QCheckBox, \"overwrite_all_metadata\")\n self.upload_all = self.findChild(QPushButton, \"upload_all\")\n self.upload_active = self.findChild(QPushButton, \"upload_active\")\n self.database_download = self.findChild(QPushButton, \"database_download\")\n self.database_download_limit = self.findChild(QComboBox, \"database_download_limit\")\n self.create_database = self.findChild(QPushButton, \"create_database\")\n self.load_database = self.findChild(QPushButton, \"load_database\")\n self.display_database_path = self.findChild(QLineEdit, \"display_database_path\")\n self.upload_progressbar = self.findChild(QProgressBar, \"upload_progressbar\")\n self.upload_tab = self.findChild(QWidget,\"upload_tab\")\n self._show_database_controls(False)\n\n # export tab controls from Qt Desinger References\n self.export_channel = self.findChild(QComboBox, \"export_channel\")\n self.export_mode = self.findChild(QComboBox, \"export_mode\")\n self.export_location = self.findChild(QComboBox, \"export_location\")\n self.export_directory = self.findChild(QTextEdit, \"export_directory\")\n self.export_modifier = self.findChild(QLineEdit, \"export_modifier\")\n self.export_single = self.findChild(QCheckBox, \"export_single\")\n self.export_dividing = self.findChild(QCheckBox, \"export_dividing\")\n self.export_divided = self.findChild(QCheckBox, \"export_divided\")\n self.export_vertical = self.findChild(QCheckBox, \"export_vertical\")\n self.export_broken = self.findChild(QCheckBox, \"export_broken\")\n self.export_edge = self.findChild(QCheckBox, \"export_edge\")\n self.export_active = self.findChild(QPushButton, \"export_active\")\n self.export_all = self.findChild(QPushButton, \"export_all\")\n self.export_statistics_pixelsize = self.findChild(QLineEdit, 'export_statistics_pixelsize')\n self.export_statistics_active = self.findChild(QPushButton, \"export_statistics_active\")\n self.export_statistics_all = self.findChild(QPushButton, \"export_statistics_all\")\n self.export_colicoords_mode = self.findChild(QComboBox, \"export_colicoords_mode\")\n self.export_progressbar = self.findChild(QProgressBar, \"export_progressbar\")\n self.export_directory.setText(\n \"Data will be exported in same folder(s) that the images/masks were originally imported from. Not Recomeneded for Nanoimager Data\")\n\n # import events\n self.autocontrast.stateChanged.connect(self._autoContrast)\n self.import_import.clicked.connect(self._importDialog)\n\n # cellpose events\n self.cellpose_load_model.clicked.connect(self._open_cellpose_model)\n self.cellpose_flowthresh.valueChanged.connect(lambda: self._updateSliderLabel(\"cellpose_flowthresh\",\n \"cellpose_flowthresh_label\"))\n self.cellpose_maskthresh.valueChanged.connect(lambda: self._updateSliderLabel(\"cellpose_maskthresh\"\n , \"cellpose_maskthresh_label\"))\n self.cellpose_minsize.valueChanged.connect(lambda: self._updateSliderLabel(\"cellpose_minsize\",\n \"cellpose_minsize_label\"))\n self.cellpose_diameter.valueChanged.connect(lambda: self._updateSliderLabel(\"cellpose_diameter\",\n \"cellpose_diameter_label\"))\n self.cellpose_segment_all.clicked.connect(self._segmentAll)\n self.cellpose_segment_active.clicked.connect(self._segmentActive)\n self.cellpose_segchannel.currentTextChanged.connect(self._updateSegChannels)\n\n # modify tab events\n self.modify_panzoom.clicked.connect(partial(self._modifyMode, \"panzoom\"))\n self.modify_segment.clicked.connect(partial(self._modifyMode, \"segment\"))\n self.modify_classify.clicked.connect(partial(self._modifyMode, \"classify\"))\n self.modify_refine.clicked.connect(partial(self._modifyMode, \"refine\"))\n self.modify_add.clicked.connect(partial(self._modifyMode, \"add\"))\n self.modify_extend.clicked.connect(partial(self._modifyMode, \"extend\"))\n self.modify_join.clicked.connect(partial(self._modifyMode, \"join\"))\n self.modify_split.clicked.connect(partial(self._modifyMode, \"split\"))\n self.modify_delete.clicked.connect(partial(self._modifyMode, \"delete\"))\n self.classify_single.clicked.connect(partial(self._modifyMode, \"single\"))\n self.classify_dividing.clicked.connect(partial(self._modifyMode, \"dividing\"))\n self.classify_divided.clicked.connect(partial(self._modifyMode, \"divided\"))\n self.classify_vertical.clicked.connect(partial(self._modifyMode, \"vertical\"))\n self.classify_broken.clicked.connect(partial(self._modifyMode, \"broken\"))\n self.classify_edge.clicked.connect(partial(self._modifyMode, \"edge\"))\n self.modify_viewmasks.stateChanged.connect(partial(self._viewerControls, \"viewmasks\"))\n self.modify_viewlabels.stateChanged.connect(partial(self._viewerControls, \"viewlabels\"))\n self.refine_all.clicked.connect(self._refine_akseg)\n self.modify_copymasktoall.clicked.connect(self._copymasktoall)\n self.modify_deleteallmasks.clicked.connect(self._deleteallmasks)\n self.find_next.clicked.connect(partial(self._sort_cells, \"next\"))\n self.find_previous.clicked.connect(partial(self._sort_cells, \"previous\"))\n\n # export events\n self.export_active.clicked.connect(partial(self._export, \"active\"))\n self.export_all.clicked.connect(partial(self._export, \"all\"))\n self.export_statistics_active.clicked.connect(partial(self._export_statistics, \"active\"))\n self.export_statistics_all.clicked.connect(partial(self._export_statistics, \"all\"))\n self.export_location.currentTextChanged.connect(self._getExportDirectory)\n\n # upload tab events\n self.upload_all.clicked.connect(partial(self._uploadDatabase, \"all\"))\n self.upload_active.clicked.connect(partial(self._uploadDatabase, \"active\"))\n self.database_download.clicked.connect(self._downloadDatabase)\n self.create_database.clicked.connect(self._create_AKSEG_database)\n self.load_database.clicked.connect(self._load_AKSEG_database)\n self.upload_initial.currentTextChanged.connect(self._populateUSERMETA)\n\n # viewer event that call updateFileName when the slider is modified\n self.contours = []\n self.viewer.dims.events.current_step.connect(self._sliderEvent)\n\n # self.segImage = self.viewer.add_image(np.zeros((1,100,100),dtype=np.uint16),name=\"Image\")\n self.class_colours = {1: (255 / 255, 255 / 255, 255 / 255, 1),\n 2: (0 / 255, 255 / 255, 0 / 255, 1),\n 3: (0 / 255, 170 / 255, 255 / 255, 1),\n 4: (170 / 255, 0 / 255, 255 / 255, 1),\n 5: (255 / 255, 170 / 255, 0 / 255, 1),\n 6: (255 / 255, 0 / 255, 0 / 255, 1), }\n\n self.classLayer = self.viewer.add_labels(np.zeros((1, 100, 100), dtype=np.uint16), opacity=0.25, name=\"Classes\",\n color=self.class_colours, metadata={0: {\"image_name\": \"\"}})\n self.segLayer = self.viewer.add_labels(np.zeros((1, 100, 100), dtype=np.uint16), opacity=1,\n name=\"Segmentations\", metadata={0: {\"image_name\": \"\"}})\n self.segLayer.contour = 1\n\n # keyboard events, only triggered when viewer is not empty (an image is loaded/active)\n self.viewer.bind_key(key=\"t\", func=partial(self._modifyMode, \"toggle\"), overwrite=True)\n self.viewer.bind_key(key=\"a\", func=partial(self._modifyMode, \"add\"), overwrite=True)\n self.viewer.bind_key(key=\"e\", func=partial(self._modifyMode, \"extend\"), overwrite=True)\n self.viewer.bind_key(key=\"j\", func=partial(self._modifyMode, \"join\"), overwrite=True)\n self.viewer.bind_key(key=\"s\", func=partial(self._modifyMode, \"split\"), overwrite=True)\n self.viewer.bind_key(key=\"d\", func=partial(self._modifyMode, \"delete\"), overwrite=True)\n self.viewer.bind_key(key=\"r\", func=partial(self._modifyMode, \"refine\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-1\", func=partial(self._modifyMode, \"single\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-2\", func=partial(self._modifyMode, \"dividing\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-3\", func=partial(self._modifyMode, \"divided\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-4\", func=partial(self._modifyMode, \"vertical\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-5\", func=partial(self._modifyMode, \"broken\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-6\", func=partial(self._modifyMode, \"edge\"), overwrite=True)\n self.viewer.bind_key(key=\"F1\", func=partial(self._modifyMode, \"panzoom\"), overwrite=True)\n self.viewer.bind_key(key=\"F2\", func=partial(self._modifyMode, \"segment\"), overwrite=True)\n self.viewer.bind_key(key=\"F3\", func=partial(self._modifyMode, \"classify\"), overwrite=True)\n # self.viewer.bind_key(key=\"Control\", func=partial(self._modifyMode, \"segment\"), overwrite=True)\n self.viewer.bind_key(key=\"h\", func=partial(self._viewerControls, \"h\"), overwrite=True)\n self.viewer.bind_key(key=\"i\", func=partial(self._viewerControls, \"i\"), overwrite=True)\n self.viewer.bind_key(key=\"o\", func=partial(self._viewerControls, \"o\"), overwrite=True)\n self.viewer.bind_key(key=\"x\", func=partial(self._viewerControls, \"x\"), overwrite=True)\n self.viewer.bind_key(key=\"z\", func=partial(self._viewerControls, \"z\"), overwrite=True)\n self.viewer.bind_key(key=\"c\", func=partial(self._viewerControls, \"c\"), overwrite=True)\n self.viewer.bind_key(key=\"Right\", func=partial(self._imageControls, \"Right\"), overwrite=True)\n self.viewer.bind_key(key=\"Left\", func=partial(self._imageControls, \"Left\"), overwrite=True)\n self.viewer.bind_key(key=\"u\", func=partial(self._imageControls, \"Upload\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-Left\", func=partial(self._manual_align_channels, \"left\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-Right\", func=partial(self._manual_align_channels, \"right\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-Up\", func=partial(self._manual_align_channels, \"up\"), overwrite=True)\n self.viewer.bind_key(key=\"Control-Down\", func=partial(self._manual_align_channels, \"down\"), overwrite=True)\n\n # mouse events\n self.segLayer.mouse_drag_callbacks.append(self._segmentationEvents)\n\n # viewer events\n self.viewer.layers.events.inserted.connect(self._manualImport)\n\n self.threadpool = QThreadPool()\n\n\n def _sort_cells(self, order):\n\n current_fov = self.viewer.dims.current_step[0]\n\n meta = self.segLayer.metadata[current_fov]\n\n self._compute_simple_cell_stats()\n\n print(self.viewer.camera.dict)\n\n find_criterion = self.find_criterion.currentText()\n find_mode = self.find_mode.currentText()\n\n cell_centre = meta[\"simple_cell_stats\"]['cell_centre']\n cell_zoom = meta[\"simple_cell_stats\"]['cell_zoom']\n\n if find_criterion == \"Cell Area\":\n criterion = meta[\"simple_cell_stats\"][\"cell_area\"]\n if find_criterion == \"Cell Solidity\":\n criterion = meta[\"simple_cell_stats\"][\"cell_solidity\"]\n if find_criterion == \"Cell Aspect Ratio\":\n criterion = meta[\"simple_cell_stats\"][\"cell_aspect_ratio\"]\n\n if find_mode == \"Ascending\":\n criterion, cell_centre, cell_zoom= zip(*sorted(zip(criterion, cell_centre, cell_zoom), key=lambda x: x[0]))\n else:\n criterion, cell_centre, cell_zoom = zip(*sorted(zip(criterion, cell_centre, cell_zoom), key=lambda x: x[0], reverse=True))\n\n current_position = tuple(np.array(self.viewer.camera.center).round())\n\n if current_position not in cell_centre:\n\n self.viewer.camera.center = cell_centre[0]\n self.viewer.camera.zoom = cell_zoom[0]\n\n else:\n\n current_index = cell_centre.index(current_position)\n\n if order == 'next':\n\n new_index = current_index + 1\n\n if order == 'previous':\n\n new_index = current_index - 1\n\n new_index = max(current_fov, min(new_index, len(cell_centre) - 1))\n\n self.viewer.camera.center = cell_centre[new_index]\n self.viewer.camera.zoom = cell_zoom[new_index]\n\n\n def _compute_simple_cell_stats(self):\n\n current_fov = self.viewer.dims.current_step[0]\n\n mask = self.segLayer.data[current_fov]\n\n mask_ids = np.unique(mask)\n\n cell_area = []\n cell_solidity = []\n cell_aspect_ratio = []\n cell_centre = []\n cell_zoom = []\n cell_id = []\n\n for mask_id in mask_ids:\n\n if mask_id != 0:\n\n cnt_mask = np.zeros(mask.shape, dtype=np.uint8)\n cnt_mask[mask==mask_id] = 255\n\n cnt, _ = cv2.findContours(cnt_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n x, y, w, h = cv2.boundingRect(cnt[0])\n y1, y2, x1, x2 = y, (y + h), x, (x + w)\n\n try:\n area = cv2.contourArea(cnt[0])\n hull = cv2.convexHull(cnt[0])\n hull_area = cv2.contourArea(hull)\n solidity = float(area) / hull_area\n (_, _), (width, height), _ = cv2.minAreaRect(cnt[0])\n aspect_ratio = max(width, height) / min(width, height)\n\n except:\n area = 0\n solidity = 0\n aspect_ratio = 0\n\n centre = (0, y1 + (y2 - y1) // 2, x1 + (x2 - x1) // 2)\n\n zoom = min((mask.shape[0]/(y2-y1)), (mask.shape[1]/(x2-x1)))/2\n\n cell_area.append(area)\n cell_solidity.append(solidity)\n cell_aspect_ratio.append(aspect_ratio)\n cell_centre.append(centre)\n cell_zoom.append(zoom)\n cell_id.append(mask_id)\n\n cell_stats = {'cell_area': cell_area,\n 'cell_solidity':cell_solidity,\n 'cell_aspect_ratio':cell_aspect_ratio,\n 'cell_centre': cell_centre,\n 'cell_zoom': cell_zoom,\n 'mask_id': cell_id}\n\n layer_names = [layer.name for layer in self.viewer.layers if layer.name]\n\n for layer in layer_names:\n\n meta = self.viewer.layers[layer].metadata[current_fov]\n meta['simple_cell_stats'] = cell_stats\n self.viewer.layers[layer].metadata[current_fov] = meta\n\n\n\n def _manual_align_channels(self, key, viewer=None):\n\n from scipy.ndimage import shift\n current_fov = self.viewer.dims.current_step[0]\n active_layer = self.viewer.layers.selection.active\n\n if key == 'up':\n shift_vector = [-1.0, 0.0]\n elif key == 'down':\n shift_vector = [1.0, 0.0]\n elif key == 'left':\n shift_vector = [0.0, -1.0]\n elif key == 'right':\n shift_vector = [0.0, 1.0]\n else:\n shift_vector = [0.0, 0.0]\n\n shift_image = False\n if active_layer != None:\n if active_layer.name not in [\"Segmentations\",\"Classes\"]:\n shift_image = True\n\n if shift_image is True:\n\n image_stack = active_layer.data.copy()\n image = image_stack[current_fov, :, :]\n image = shift(image, shift=shift_vector)\n image_stack[current_fov, :, :] = np.expand_dims(image,0)\n\n active_layer.data = image_stack\n\n else:\n\n mask_stack = self.segLayer.data.copy()\n label_stack = self.classLayer.data.copy()\n\n mask = mask_stack[current_fov, :, :]\n label = label_stack[current_fov, :, :]\n\n mask = shift(mask, shift=shift_vector)\n label = shift(label, shift=shift_vector)\n\n mask_stack[current_fov, :, :] = np.expand_dims(mask, 0)\n label_stack[current_fov, :, :] = np.expand_dims(label, 0)\n\n self.segLayer.data = mask_stack\n self.classLayer.data = label_stack\n\n def _create_AKSEG_database(self):\n\n desktop = os.path.expanduser(\"~/Desktop\")\n path = QFileDialog.getExistingDirectory(self, \"Select Directory\",desktop)\n\n if path:\n folders = [\"Images\",\"Metadata\",\"Models\"]\n\n path = os.path.abspath(path)\n path = os.path.join(path,\"AKSEG_Database\")\n\n if os.path.isdir(path) is False:\n os.mkdir(path)\n\n folders = [os.path.join(path,folder) for folder in folders if os.path.isdir(os.path.join(path,folder)) is False]\n\n for folder in folders:\n\n os.mkdir(folder)\n\n akseg_metadata = pd.DataFrame(columns = [\"User Initial\",\n \"Image Content\",\n \"Microscope\",\n \"Modality\",\n \"Light Source\",\n \"Stains\",\n \"Antibiotic\",\n \"Antibiotic Concentration\",\n \"Treatment Time (mins)\",\n \"Mounting Method\",\n \"Protocol\"])\n\n user_metadata = pd.DataFrame(columns=[\"User Initial\",\n \"User Meta #1\",\n \"User Meta #2\",\n \"User Meta #3\"])\n\n metadata_path = os.path.join(path,\"Metadata\",\"AKSEG Metadata.xlsx\")\n\n with pd.ExcelWriter(metadata_path) as writer:\n akseg_metadata.to_excel(writer, sheet_name='AKSEG Metadata', index=False, startrow=2, startcol=1)\n user_metadata.to_excel(writer, sheet_name='User Metadata', index=False, startrow=2, startcol=1)\n\n def _load_AKSEG_database(self):\n\n desktop = os.path.expanduser(\"~/Desktop\")\n path = QFileDialog.getExistingDirectory(self, \"Select Directory\",desktop)\n\n if \"AKSEG\" in path:\n\n AKSEG_folders = [\"Images\",\"Metadata\",\"Models\"]\n dir_folders = [folder.split(\"\\\\\")[-1] for folder in glob(path + \"*/*\")]\n\n if set(AKSEG_folders).issubset(dir_folders):\n\n self.database_path = os.path.abspath(path)\n from napari_akseg._utils_database import populate_upload_combos\n populate_upload_combos(self)\n self._populateUSERMETA\n\n self.display_database_path.setText(path)\n self._show_database_controls(True)\n\n def _show_database_controls(self, visible=True):\n\n all_database_controls = self.upload_tab.findChildren((QCheckBox, QComboBox, QLabel, QPushButton, QProgressBar))\n load_database_controls = [\"create_database\",\n \"load_database\",\n \"display_database_path\",\n \"display_database_label\",\n \"database_io_title\"]\n [item.setVisible(visible) for item in all_database_controls if item.objectName() not in load_database_controls]\n\n\n def _export_statistics(self, mode='active'):\n\n pixel_size = float(self.export_statistics_pixelsize.text())\n\n colicoords_channel = self.export_colicoords_mode.currentText()\n colicoords_channel = colicoords_channel.replace(\"Mask + \", \"\")\n\n if pixel_size <= 0:\n pixel_size = 1\n\n desktop = os.path.expanduser(\"~/Desktop\")\n\n path = QFileDialog.getExistingDirectory(self, \"Select Directory\", desktop)\n\n if path:\n\n path = os.path.abspath(path)\n\n from napari_akseg._utils_statistics import get_cell_statistics, process_cell_statistics\n self.get_cell_statistics = partial(get_cell_statistics,self)\n self.process_cell_statistics = partial(process_cell_statistics, self)\n\n worker = Worker(self.get_cell_statistics, mode=mode, pixel_size=pixel_size)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"export\"))\n worker.signals.result.connect(partial(self.process_cell_statistics, path=path))\n self.threadpool.start(worker)\n cell_data = worker.result()\n\n if self.export_colicoords_mode.currentIndex() != 0:\n\n from napari_akseg._utils_colicoords import run_colicoords\n self.run_colicoords = partial(run_colicoords, self)\n\n worker = Worker(self.run_colicoords, cell_data=cell_data,\n colicoords_channel=colicoords_channel,\n pixel_size=pixel_size,\n statistics=True)\n\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"export\"))\n worker.signals.result.connect(partial(self.process_cell_statistics, path=path))\n self.threadpool.start(worker)\n\n def _refine_akseg(self):\n\n pixel_size = float(self.export_statistics_pixelsize.text())\n\n if pixel_size <= 0:\n pixel_size = 1\n\n current_fov = self.viewer.dims.current_step[0]\n\n channel = self.refine_channel.currentText()\n colicoords_channel = channel.replace(\"Mask + \", \"\")\n\n mask_stack = self.segLayer.data\n mask = mask_stack[current_fov, :, :].copy()\n\n from napari_akseg._utils_statistics import get_cell_statistics\n from napari_akseg._utils_colicoords import run_colicoords\n self.get_cell_statistics = partial(get_cell_statistics,self)\n self.run_colicoords = partial(run_colicoords,self)\n\n worker = Worker(self.get_cell_statistics, mode='active', pixel_size=pixel_size)\n self.threadpool.start(worker)\n cell_data = worker.result()\n worker = Worker(self.run_colicoords, cell_data=cell_data, colicoords_channel=colicoords_channel,\n pixel_size=pixel_size)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"modify\"))\n worker.signals.result.connect(self.process_colicoords)\n self.threadpool.start(worker)\n\n def _uploadDatabase(self, mode):\n\n from napari_akseg._utils_database import _upload_AKSEG_database\n self._upload_AKSEG_database = partial(_upload_AKSEG_database,self)\n\n worker = Worker(self._upload_AKSEG_database, mode=mode)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"database\"))\n self.threadpool.start(worker)\n\n def _downloadDatabase(self):\n\n from napari_akseg._utils_database import _get_database_paths, read_AKSEG_directory, read_AKSEG_images\n self._get_database_paths = partial(_get_database_paths,self)\n self.read_AKSEG_images = partial(read_AKSEG_images, self)\n\n self.active_import_mode = \"AKSEG\"\n\n paths, import_limit = self._get_database_paths()\n\n if len(paths) == 0:\n\n print(\"no matching database files found\")\n\n else:\n\n measurements, file_paths, channels = read_AKSEG_directory(self, paths, import_limit)\n\n worker = Worker(self.read_AKSEG_images, measurements=measurements, channels=channels)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"database\"))\n self.threadpool.start(worker)\n\n def _updateSegChannels(self):\n\n layer_names = [layer.name for layer in self.viewer.layers if layer.name not in [\"Segmentations\", \"Classes\"]]\n\n segChannel = self.cellpose_segchannel.currentText()\n\n self.export_channel.setCurrentText(segChannel)\n\n def _aksegProgresbar(self, progress, progressbar):\n\n if progressbar == \"import\":\n self.import_progressbar.setValue(progress)\n if progressbar == \"export\":\n self.export_progressbar.setValue(progress)\n if progressbar == 'cellpose':\n self.cellpose_progressbar.setValue(progress)\n if progressbar == \"database\":\n self.upload_progressbar.setValue(progress)\n if progressbar == 'modify':\n self.modify_progressbar.setValue(progress)\n\n if progress == 100:\n time.sleep(1)\n self.import_progressbar.setValue(0)\n self.export_progressbar.setValue(0)\n self.cellpose_progressbar.setValue(0)\n self.upload_progressbar.setValue(0)\n self.modify_progressbar.setValue(0)\n\n def _importDialog(self):\n\n import_mode = self.import_mode.currentText()\n import_filemode = self.import_filemode.currentText()\n\n from napari_akseg._utils_database import check_database_access\n dialog_dir = check_database_access(file_path=r\"\\\\CMDAQ4.physics.ox.ac.uk\\AKGroup\")\n\n if import_filemode == \"Import File(s)\":\n paths, filter = QFileDialog.getOpenFileNames(self, \"Open Files\", dialog_dir, \"Files (*)\")\n\n if import_filemode == \"Import Directory\":\n path = QFileDialog.getExistingDirectory(self, \"Select Directory\", dialog_dir)\n\n paths = [path]\n\n if import_mode == \"Import Images\":\n\n self.import_images = partial(napari_akseg._utils.import_images, self)\n\n worker = Worker(self.import_images, file_paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import NanoImager Data\":\n\n self.read_nim_directory = partial(napari_akseg._utils.read_nim_directory, self)\n self.read_nim_images = partial(napari_akseg._utils.read_nim_images, self)\n\n measurements, file_paths, channels = self.read_nim_directory(paths)\n\n worker = Worker(self.read_nim_images, measurements=measurements, channels=channels)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import Masks\":\n\n self.import_masks = partial(napari_akseg._utils.import_masks, self)\n\n self.import_masks(paths)\n\n if import_mode == \"Import Cellpose .npy file(s)\":\n\n self.import_cellpose = partial(napari_akseg._utils.import_cellpose, self)\n\n worker = Worker(self.import_cellpose, file_paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import Oufti .mat file(s)\":\n\n self.import_oufti = partial(napari_akseg._utils.import_oufti, self)\n\n worker = Worker(self.import_oufti, file_paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import JSON .txt file(s)\":\n\n self.import_JSON = partial(napari_akseg._utils.import_JSON, self)\n\n worker = Worker(self.import_JSON, file_paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import ImageJ files(s)\":\n\n self.import_imagej = partial(napari_akseg._utils.import_imagej, self)\n\n worker = Worker(self.import_imagej, paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import Images + Masks Dataset\":\n\n self.import_dataset = partial(napari_akseg._utils.import_dataset, self)\n\n worker = Worker(self.import_dataset, file_paths=paths)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import AKSEG Dataset\":\n\n from napari_akseg._utils_database import read_AKSEG_directory\n self.read_AKSEG_images = partial(napari_akseg._utils_database.read_AKSEG_images, self)\n\n import_limit = self.import_limit.currentText()\n\n measurements, file_paths, channels = read_AKSEG_directory(self, paths, import_limit)\n\n worker = Worker(self.read_AKSEG_images, measurements=measurements, channels=channels)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n if import_mode == \"Import ScanR Data\":\n\n from napari_akseg._utils import read_scanr_directory, read_scanr_images\n self.read_scanr_images = partial(read_scanr_images, self)\n\n measurements, file_paths, channels = read_scanr_directory(self, paths)\n\n worker = Worker(self.read_scanr_images, measurements=measurements, channels=channels)\n worker.signals.result.connect(self._process_import)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"import\"))\n self.threadpool.start(worker)\n\n\n\n def _getExportDirectory(self):\n\n if self.export_location.currentText() == \"Import Directory\":\n self.export_directory.setText(\n \"Data will be exported in same folder(s) that the images/masks were originally imported from. Not Recomeneded for Nanoimager Data\")\n\n if self.export_location.currentText() == \"Select Directory\":\n\n from napari_akseg._utils_database import check_database_access\n dialog_dir = check_database_access(file_path=r\"\\\\CMDAQ4.physics.ox.ac.uk\\AKGroup\")\n\n path = QFileDialog.getExistingDirectory(self, \"Select Export Directory\", dialog_dir)\n\n if path:\n self.export_directory.setText(path)\n\n def _export(self, mode):\n\n self.export_files = partial(napari_akseg._utils.export_files, self)\n\n worker = Worker(self.export_files, mode=mode)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"export\"))\n self.threadpool.start(worker)\n\n def _segmentActive(self):\n\n from napari_akseg._utils_cellpose import _run_cellpose, _process_cellpose\n self._run_cellpose = partial(_run_cellpose, self)\n self._process_cellpose = partial(_process_cellpose, self)\n\n current_fov = self.viewer.dims.current_step[0]\n chanel = self.cellpose_segchannel.currentText()\n\n images = self.viewer.layers[chanel].data\n\n image = [images[current_fov, :, :]]\n\n worker = Worker(self._run_cellpose, images=image)\n worker.signals.result.connect(self._process_cellpose)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"cellpose\"))\n self.threadpool.start(worker)\n\n def _segmentAll(self):\n\n from napari_akseg._utils_cellpose import _run_cellpose, _process_cellpose\n self._run_cellpose = partial(_run_cellpose, self)\n self._process_cellpose = partial(_process_cellpose, self)\n\n channel = self.cellpose_segchannel.currentText()\n\n images = self.viewer.layers[channel].data\n\n images = unstack_images(images)\n\n worker = Worker(self._run_cellpose, images=images)\n worker.signals.result.connect(self._process_cellpose)\n worker.signals.progress.connect(partial(self._aksegProgresbar, progressbar=\"cellpose\"))\n self.threadpool.start(worker)\n\n def _updateSliderLabel(self, slider_name, label_name):\n\n self.slider = self.findChild(QSlider, slider_name)\n self.label = self.findChild(QLabel, label_name)\n\n slider_value = self.slider.value()\n\n if slider_name == \"cellpose_flowthresh\" or slider_name == \"cellpose_maskthresh\":\n self.label.setText(str(slider_value / 100))\n else:\n self.label.setText(str(slider_value))\n\n def _updateSegmentationCombo(self):\n\n layer_names = [layer.name for layer in self.viewer.layers if layer.name not in [\"Segmentations\", \"Classes\"]]\n\n self.cellpose_segchannel.clear()\n self.cellpose_segchannel.addItems(layer_names)\n\n self.export_channel.clear()\n self.export_channel.addItems(layer_names)\n\n self.refine_channel.clear()\n refine_layers = [\"Mask + \" + layer for layer in layer_names]\n self.refine_channel.addItems(['Mask'] + refine_layers)\n\n self.export_colicoords_mode.clear()\n refine_layers = [\"Mask + \" + layer for layer in layer_names]\n self.export_colicoords_mode.addItems(['None (OpenCV Stats)', 'Mask'] + refine_layers)\n\n if \"532\" in layer_names:\n index532 = layer_names.index(\"532\")\n self.cellpose_segchannel.setCurrentIndex(index532)\n\n def _sliderEvent(self, current_step):\n\n self._updateFileName()\n self._autoContrast()\n\n def _autoContrast(self):\n\n try:\n if self.autocontrast.isChecked():\n\n current_fov = self.viewer.dims.current_step[0]\n active_layer = self.viewer.layers.selection.active\n\n image = self.viewer.layers[str(active_layer)].data[current_fov]\n metadata = self.viewer.layers[str(active_layer)].metadata[current_fov]\n\n contrast_limit = metadata[\"contrast_limit\"]\n gamma = metadata[\"contrast_gamma\"]\n\n if contrast_limit[1] > contrast_limit[0]:\n self.viewer.layers[str(active_layer)].contrast_limits = contrast_limit\n self.viewer.layers[str(active_layer)].gamma = gamma\n\n except:\n pass\n\n def _updateFileName(self):\n\n try:\n\n current_fov = self.viewer.dims.current_step[0]\n active_layer = self.viewer.layers.selection.active\n\n metadata = self.viewer.layers[str(active_layer)].metadata[current_fov]\n\n file_name = metadata[\"image_name\"]\n\n self.viewer.text_overlay.visible = True\n\n self.viewer.text_overlay.text = file_name\n\n except:\n pass\n\n def _process_import(self, imported_data, rearrange=True):\n\n layer_names = [layer.name for layer in self.viewer.layers if layer.name not in [\"Segmentations\", \"Classes\"]]\n\n if self.clear_previous.isChecked() == True:\n # removes all layers (except segmentation layer)\n for layer_name in layer_names:\n self.viewer.layers.remove(self.viewer.layers[layer_name])\n # reset segmentation and class layers\n self.segLayer.data = np.zeros((1, 100, 100), dtype=np.uint16)\n self.classLayer.data = np.zeros((1, 100, 100), dtype=np.uint16)\n\n imported_images = imported_data[\"imported_images\"]\n\n if \"akmeta\" in imported_data.keys():\n from napari_akseg._utils_database import update_akmetadata\n update_akmetadata(self, imported_data[\"akmeta\"])\n\n for layer_name, layer_data in imported_images.items():\n\n images = layer_data['images']\n masks = layer_data['masks']\n classes = layer_data['classes']\n metadata = layer_data['metadata']\n\n from napari_akseg._utils import stack_images\n new_image_stack, new_metadata = stack_images(images, metadata)\n new_mask_stack, new_metadata = stack_images(masks, metadata)\n new_class_stack, new_metadata = stack_images(classes, metadata)\n\n if len(new_mask_stack) == 0:\n new_mask_stack = np.zeros(new_image_stack.shape, dtype=np.uint16)\n\n if len(new_class_stack) == 0:\n new_class_stack = np.zeros(new_image_stack.shape, dtype=np.uint16)\n\n colormap = 'gray'\n\n if layer_name == \"405\":\n colormap = \"green\"\n if layer_name == \"532\":\n colormap = \"red\"\n if layer_name == \"Cy3\":\n colormap = \"red\"\n if layer_name == \"DAPI\":\n colormap = \"green\"\n\n\n if self.clear_previous.isChecked() == False and layer_name in layer_names:\n\n current_image_stack = self.viewer.layers[layer_name].data\n current_metadata = self.viewer.layers[layer_name].metadata\n current_mask_stack = self.segLayer.data\n current_class_stack = self.classLayer.data\n\n if len(current_image_stack) == 0:\n\n self.imageLayer = self.viewer.add_image(new_image_stack, name=layer_name, colormap=colormap,\n gamma=0.8, metadata=new_metadata)\n self.segLayer.data = new_mask_stack\n self.classLayer.data = new_class_stack\n self.segLayer.metadata = new_metadata\n\n\n else:\n\n from napari_akseg._utils import append_image_stacks\n appended_image_stack, appended_metadata = append_image_stacks(current_metadata, new_metadata,\n current_image_stack, new_image_stack)\n\n appended_mask_stack, appended_metadata = append_image_stacks(current_metadata, new_metadata,\n current_mask_stack, new_mask_stack)\n\n appended_class_stack, appended_metadata = append_image_stacks(current_metadata, new_metadata,\n current_class_stack, new_class_stack)\n\n self.viewer.layers.remove(self.viewer.layers[layer_name])\n self.viewer.add_image(appended_image_stack, name=layer_name, colormap=colormap, gamma=0.8,\n metadata=appended_metadata)\n self.segLayer.data = appended_mask_stack\n self.classLayer.data = appended_class_stack\n self.segLayer.metadata = appended_metadata\n\n\n else:\n self.viewer.add_image(new_image_stack, name=layer_name, colormap=colormap, gamma=0.8,\n metadata=new_metadata)\n self.segLayer.data = new_mask_stack\n self.classLayer.data = new_class_stack\n self.segLayer.metadata = new_metadata\n\n layer_names = [layer.name for layer in self.viewer.layers if layer.name not in [\"Segmentations\", \"Classes\"]]\n\n # ensures segmentation and classes is in correct order in the viewer\n for layer in layer_names:\n self.viewer.layers[layer].selected = False\n layer_index = self.viewer.layers.index(layer)\n self.viewer.layers.move(layer_index, 0)\n\n if \"532\" in layer_names and rearrange == True:\n layer_name = \"532\"\n num_layers = len(self.viewer.layers)\n layer_ref = self.viewer.layers[layer_name]\n layer_index = self.viewer.layers.index(layer_name)\n self.viewer.layers.selection.select_only(layer_ref)\n self.viewer.layers.move(layer_index, num_layers - 2)\n\n if \"Cy3\" in layer_names and rearrange == True:\n layer_name = \"Cy3\"\n num_layers = len(self.viewer.layers)\n layer_ref = self.viewer.layers[layer_name]\n layer_index = self.viewer.layers.index(layer_name)\n self.viewer.layers.selection.select_only(layer_ref)\n self.viewer.layers.move(layer_index, num_layers - 2)\n\n # sets labels such that only label contours are shown\n self.segLayer.contour = 1\n self.segLayer.opacity = 1\n\n self._updateFileName()\n self._updateSegmentationCombo()\n self._updateSegChannels()\n self.import_progressbar.reset()\n\n self.viewer.reset_view()\n self._autoContrast()\n self._autoClassify()\n\n align_image_channels(self)\n\n def _autoClassify(self, reset=False):\n\n mask_stack = self.segLayer.data.copy()\n label_stack = self.classLayer.data.copy()\n\n for i in range(len(mask_stack)):\n\n mask = mask_stack[i, :, :]\n label = label_stack[i, :, :]\n\n label_ids = np.unique(label)\n mask_ids = np.unique(mask)\n\n if len(label_ids) == 1 or reset == True:\n\n label = np.zeros(label.shape, dtype=np.uint16)\n\n for mask_id in mask_ids:\n\n if mask_id != 0:\n\n cnt_mask = np.zeros(label.shape, dtype=np.uint8)\n cnt_mask[mask == mask_id] = 255\n\n cnt, _ = cv2.findContours(cnt_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n x, y, w, h = cv2.boundingRect(cnt[0])\n y1, y2, x1, x2 = y, (y + h), x, (x + w)\n\n # appends contour to list if the bounding coordinates are along the edge of the image\n if y1 > 0 and y2 < cnt_mask.shape[0] and x1 > 0 and x2 < cnt_mask.shape[1]:\n\n label[mask == mask_id] = 1\n\n else:\n\n label[mask == mask_id] = 6\n\n label_stack[i, :, :] = label\n\n self.classLayer.data = label_stack\n\n" ]
[ [ "numpy.expand_dims", "numpy.unique", "pandas.DataFrame", "pandas.ExcelWriter", "numpy.array", "scipy.ndimage.shift", "numpy.zeros" ] ]
xcz011/numpy
[ "bc3c7176987a5017126abbc861458fe53cd099fc" ]
[ "numpy/polynomial/tests/test_polynomial.py" ]
[ "\"\"\"Tests for polynomial module.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom functools import reduce\n\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nfrom numpy.testing import (\n assert_almost_equal, assert_raises, assert_equal, assert_,\n assert_array_equal)\n\n\ndef trim(x):\n return poly.polytrim(x, tol=1e-6)\n\nT0 = [1]\nT1 = [0, 1]\nT2 = [-1, 0, 2]\nT3 = [0, -3, 0, 4]\nT4 = [1, 0, -8, 0, 8]\nT5 = [0, 5, 0, -20, 0, 16]\nT6 = [-1, 0, 18, 0, -48, 0, 32]\nT7 = [0, -7, 0, 56, 0, -112, 0, 64]\nT8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]\nT9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]\n\nTlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]\n\n\nclass TestConstants(object):\n\n def test_polydomain(self):\n assert_equal(poly.polydomain, [-1, 1])\n\n def test_polyzero(self):\n assert_equal(poly.polyzero, [0])\n\n def test_polyone(self):\n assert_equal(poly.polyone, [1])\n\n def test_polyx(self):\n assert_equal(poly.polyx, [0, 1])\n\n\nclass TestArithmetic(object):\n\n def test_polyadd(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = poly.polyadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polysub(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = poly.polysub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polymulx(self):\n assert_equal(poly.polymulx([0]), [0])\n assert_equal(poly.polymulx([1]), [0, 1])\n for i in range(1, 5):\n ser = [0]*i + [1]\n tgt = [0]*(i + 1) + [1]\n assert_equal(poly.polymulx(ser), tgt)\n\n def test_polymul(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(i + j + 1)\n tgt[i + j] += 1\n res = poly.polymul([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polydiv(self):\n # check zero division\n assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])\n\n # check scalar division\n quo, rem = poly.polydiv([2], [2])\n assert_equal((quo, rem), (1, 0))\n quo, rem = poly.polydiv([2, 2], [2])\n assert_equal((quo, rem), ((1, 1), 0))\n\n # check rest.\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n ci = [0]*i + [1, 2]\n cj = [0]*j + [1, 2]\n tgt = poly.polyadd(ci, cj)\n quo, rem = poly.polydiv(tgt, ci)\n res = poly.polyadd(poly.polymul(quo, ci), rem)\n assert_equal(res, tgt, err_msg=msg)\n\n def test_polypow(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n c = np.arange(i + 1)\n tgt = reduce(poly.polymul, [c]*j, np.array([1]))\n res = poly.polypow(c, j) \n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n\nclass TestEvaluation(object):\n # coefficients of 1 + 2*x + 3*x**2\n c1d = np.array([1., 2., 3.])\n c2d = np.einsum('i,j->ij', c1d, c1d)\n c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)\n\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n y = poly.polyval(x, [1., 2., 3.])\n\n def test_polyval(self):\n #check empty input\n assert_equal(poly.polyval([], [1]).size, 0)\n\n #check normal input)\n x = np.linspace(-1, 1)\n y = [x**i for i in range(5)]\n for i in range(5):\n tgt = y[i]\n res = poly.polyval(x, [0]*i + [1])\n assert_almost_equal(res, tgt)\n tgt = x*(x**2 - 1)\n res = poly.polyval(x, [0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n #check that shape is preserved\n for i in range(3):\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(poly.polyval(x, [1]).shape, dims)\n assert_equal(poly.polyval(x, [1, 0]).shape, dims)\n assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)\n\n #check masked arrays are processed correctly\n mask = [False, True, False]\n mx = np.ma.array([1, 2, 3], mask=mask)\n res = np.polyval([7, 5, 3], mx)\n assert_array_equal(res.mask, mask)\n\n #check subtypes of ndarray are preserved\n class C(np.ndarray):\n pass\n\n cx = np.array([1, 2, 3]).view(C)\n assert_equal(type(np.polyval([2, 3, 4], cx)), C)\n\n def test_polyvalfromroots(self):\n # check exception for broadcasting x values over root array with\n # too few dimensions\n assert_raises(ValueError, poly.polyvalfromroots,\n [1], [1], tensor=False)\n\n # check empty input\n assert_equal(poly.polyvalfromroots([], [1]).size, 0)\n assert_(poly.polyvalfromroots([], [1]).shape == (0,))\n\n # check empty input + multidimensional roots\n assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)\n assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))\n\n # check scalar input\n assert_equal(poly.polyvalfromroots(1, 1), 0)\n assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,))\n\n # check normal input)\n x = np.linspace(-1, 1)\n y = [x**i for i in range(5)]\n for i in range(1, 5):\n tgt = y[i]\n res = poly.polyvalfromroots(x, [0]*i)\n assert_almost_equal(res, tgt)\n tgt = x*(x - 1)*(x + 1)\n res = poly.polyvalfromroots(x, [-1, 0, 1])\n assert_almost_equal(res, tgt)\n\n # check that shape is preserved\n for i in range(3):\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)\n assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)\n assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)\n\n # check compatibility with factorization\n ptest = [15, 2, -16, -2, 1]\n r = poly.polyroots(ptest)\n x = np.linspace(-1, 1)\n assert_almost_equal(poly.polyval(x, ptest),\n poly.polyvalfromroots(x, r))\n\n # check multidimensional arrays of roots and values\n # check tensor=False\n rshape = (3, 5)\n x = np.arange(-3, 2)\n r = np.random.randint(-5, 5, size=rshape)\n res = poly.polyvalfromroots(x, r, tensor=False)\n tgt = np.empty(r.shape[1:])\n for ii in range(tgt.size):\n tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])\n assert_equal(res, tgt)\n\n # check tensor=True\n x = np.vstack([x, 2*x])\n res = poly.polyvalfromroots(x, r, tensor=True)\n tgt = np.empty(r.shape[1:] + x.shape)\n for ii in range(r.shape[1]):\n for jj in range(x.shape[0]):\n tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])\n assert_equal(res, tgt)\n\n def test_polyval2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)\n\n #test values\n tgt = y1*y2\n res = poly.polyval2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polyval2d(z, z, self.c2d)\n assert_(res.shape == (2, 3))\n\n def test_polyval3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)\n\n #test values\n tgt = y1*y2*y3\n res = poly.polyval3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polyval3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3))\n\n def test_polygrid2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j->ij', y1, y2)\n res = poly.polygrid2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polygrid2d(z, z, self.c2d)\n assert_(res.shape == (2, 3)*2)\n\n def test_polygrid3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j,k->ijk', y1, y2, y3)\n res = poly.polygrid3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polygrid3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3)*3)\n\n\nclass TestIntegral(object):\n\n def test_polyint(self):\n # check exceptions\n assert_raises(TypeError, poly.polyint, [0], .5)\n assert_raises(ValueError, poly.polyint, [0], -1)\n assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])\n assert_raises(ValueError, poly.polyint, [0], lbnd=[0])\n assert_raises(ValueError, poly.polyint, [0], scl=[0])\n assert_raises(TypeError, poly.polyint, [0], axis=.5)\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = poly.polyint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n res = poly.polyint(pol, m=1, k=[i])\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n res = poly.polyint(pol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(poly.polyval(-1, res), i)\n\n # check single integration with integration constant and scaling\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n res = poly.polyint(pol, m=1, k=[i], scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1)\n res = poly.polyint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k])\n res = poly.polyint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)\n res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k], scl=2)\n res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyint_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T\n res = poly.polyint(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyint(c) for c in c2d])\n res = poly.polyint(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])\n res = poly.polyint(c2d, k=3, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestDerivative(object):\n\n def test_polyder(self):\n # check exceptions\n assert_raises(TypeError, poly.polyder, [0], .5)\n assert_raises(ValueError, poly.polyder, [0], -1)\n\n # check that zeroth derivative does nothing\n for i in range(5):\n tgt = [0]*i + [1]\n res = poly.polyder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = poly.polyder(poly.polyint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyder_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T\n res = poly.polyder(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyder(c) for c in c2d])\n res = poly.polyder(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestVander(object):\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n\n def test_polyvander(self):\n # check for 1d x\n x = np.arange(3)\n v = poly.polyvander(x, 3)\n assert_(v.shape == (3, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], poly.polyval(x, coef))\n\n # check for 2d x\n x = np.array([[1, 2], [3, 4], [5, 6]])\n v = poly.polyvander(x, 3)\n assert_(v.shape == (3, 2, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], poly.polyval(x, coef))\n\n def test_polyvander2d(self):\n # also tests polyval2d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3))\n van = poly.polyvander2d(x1, x2, [1, 2])\n tgt = poly.polyval2d(x1, x2, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = poly.polyvander2d([x1], [x2], [1, 2])\n assert_(van.shape == (1, 5, 6))\n\n def test_polyvander3d(self):\n # also tests polyval3d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3, 4))\n van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])\n tgt = poly.polyval3d(x1, x2, x3, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])\n assert_(van.shape == (1, 5, 24))\n\n\nclass TestCompanion(object):\n\n def test_raises(self):\n assert_raises(ValueError, poly.polycompanion, [])\n assert_raises(ValueError, poly.polycompanion, [1])\n\n def test_dimensions(self):\n for i in range(1, 5):\n coef = [0]*i + [1]\n assert_(poly.polycompanion(coef).shape == (i, i))\n\n def test_linear_root(self):\n assert_(poly.polycompanion([1, 2])[0, 0] == -.5)\n\n\nclass TestMisc(object):\n\n def test_polyfromroots(self):\n res = poly.polyfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1, 5):\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = Tlist[i]\n res = poly.polyfromroots(roots)*2**(i-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyroots(self):\n assert_almost_equal(poly.polyroots([1]), [])\n assert_almost_equal(poly.polyroots([1, 2]), [-.5])\n for i in range(2, 5):\n tgt = np.linspace(-1, 1, i)\n res = poly.polyroots(poly.polyfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyfit(self):\n def f(x):\n return x*(x - 1)*(x - 2)\n\n def f2(x):\n return x**4 + x**2 + 1\n\n # Test exceptions\n assert_raises(ValueError, poly.polyfit, [1], [1], -1)\n assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)\n assert_raises(TypeError, poly.polyfit, [], [1], 0)\n assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)\n assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)\n assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)\n assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])\n assert_raises(ValueError, poly.polyfit, [1], [1], [-1,])\n assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6])\n assert_raises(TypeError, poly.polyfit, [1], [1], [])\n\n # Test fit\n x = np.linspace(0, 2)\n y = f(x)\n #\n coef3 = poly.polyfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(poly.polyval(x, coef3), y)\n coef3 = poly.polyfit(x, y, [0, 1, 2, 3])\n assert_equal(len(coef3), 4)\n assert_almost_equal(poly.polyval(x, coef3), y)\n #\n coef4 = poly.polyfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(poly.polyval(x, coef4), y)\n coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4])\n assert_equal(len(coef4), 5)\n assert_almost_equal(poly.polyval(x, coef4), y)\n #\n coef2d = poly.polyfit(x, np.array([y, y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3])\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n wcoef3 = poly.polyfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n # test scaling with complex values x points whose square\n # is zero when summed.\n x = [1, 1j, -1, -1j]\n assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])\n assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1])\n # test fitting only even Polyendre polynomials\n x = np.linspace(-1, 1)\n y = f2(x)\n coef1 = poly.polyfit(x, y, 4)\n assert_almost_equal(poly.polyval(x, coef1), y)\n coef2 = poly.polyfit(x, y, [0, 2, 4])\n assert_almost_equal(poly.polyval(x, coef2), y)\n assert_almost_equal(coef1, coef2)\n\n def test_polytrim(self):\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, poly.polytrim, coef, -1)\n\n # Test results\n assert_equal(poly.polytrim(coef), coef[:-1])\n assert_equal(poly.polytrim(coef, 1), coef[:-3])\n assert_equal(poly.polytrim(coef, 2), [0])\n\n def test_polyline(self):\n assert_equal(poly.polyline(3, 4), [3, 4])\n" ]
[ [ "numpy.dot", "numpy.einsum", "numpy.linspace", "numpy.polynomial.polynomial.polyfit", "numpy.polynomial.polynomial.polymulx", "numpy.polynomial.polynomial.polyval2d", "numpy.polynomial.polynomial.polygrid2d", "numpy.polynomial.polynomial.polytrim", "numpy.zeros_like", "numpy.ma.array", "numpy.polynomial.polynomial.polyfromroots", "numpy.polyval", "numpy.polynomial.polynomial.polyvander3d", "numpy.random.randint", "numpy.polynomial.polynomial.polypow", "numpy.testing.assert_equal", "numpy.polynomial.polynomial.polysub", "numpy.polynomial.polynomial.polydiv", "numpy.arange", "numpy.testing.assert_almost_equal", "numpy.polynomial.polynomial.polyder", "numpy.zeros", "numpy.polynomial.polynomial.polyvander2d", "numpy.polynomial.polynomial.polyval3d", "numpy.polynomial.polynomial.polyint", "numpy.polynomial.polynomial.polyadd", "numpy.polynomial.polynomial.polyval", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.polynomial.polynomial.polygrid3d", "numpy.array", "numpy.polynomial.polynomial.polycompanion", "numpy.random.random", "numpy.polynomial.polynomial.polyvalfromroots", "numpy.polynomial.polynomial.polyroots", "numpy.empty", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.polynomial.polynomial.polyvander", "numpy.polynomial.polynomial.polymul", "numpy.polynomial.polynomial.polyline", "numpy.vstack" ] ]
hhoppe/hhoppe-tools
[ "5a72a9bf5e2e3e6648cb60d0dbfad10b34110231" ]
[ "hhoppe_tools/__init__.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Library of Python tools -- Hugues Hoppe.\n# pylint: disable=line-too-long\n\nUseful commands to test and polish this file:\n\nbash -c 'f=__init__.py; true && env python3 $f; env mypy --strict \"$f\"; autopep8 -a -a -a --max-line-length 80 --indent-size 2 --ignore E265 --diff \"$f\"; pylint --indent-string=\" \" --disable=C0103,C0302,C0415,R0902,R0903,R0913,R0914,W0640 \"$f\"; true && python3 -m doctest -v \"$f\" | perl -ne \"print if /had no tests/../passed all/\" | head -n -1; true && env pytest ..; echo All ran.'\n\nenv pytest --doctest-modules ..\nenv python3 -m doctest -v hhoppe_tools.py | perl -ne 'print if /had no tests/../passed all/' | tail -n +2 | head -n -1\nhhoppe_tools.py\nenv mypy --strict hhoppe_tools.py\nbash -c \"autopep8 -a -a -a --max-line-length 80 --indent-size 2 --ignore E265 hhoppe_tools.py >~/tmp/v && ediff hhoppe_tools.py ~/tmp/v\"\nbash -c 'pylint --indent-string=\" \" --disable=C0103,C0302,C0415,R0201,R0902,R0903,R0913,R0914 hhoppe_tools.py'\nbash -c \"pydoc3 ~/bin/hhoppe_tools.py\" # Print module help\ngpylint hhoppe_tools.py\n\n# pylint: enable=line-too-long\n\"\"\"\n\n__docformat__ = 'google'\n__version__ = '0.8.4'\n__version_info__ = tuple(int(num) for num in __version__.split('.'))\n\nimport ast\nimport collections.abc\nimport contextlib\nimport cProfile\nimport dataclasses\nimport doctest\nimport functools\nimport gc\nimport io # pylint: disable=unused-import\nimport importlib.util\nimport itertools\nimport math\nimport numbers\nimport os # pylint: disable=unused-import\nimport pathlib\nimport pstats\nimport re\nimport stat\nimport subprocess\nimport sys\nimport tempfile # pylint:disable=unused-import\nimport time\nimport traceback\nimport typing\nfrom typing import Any, Callable, Dict, Generator, Iterable\nfrom typing import Iterator, List, Mapping, Optional, Sequence, Set\nfrom typing import Tuple, TypeVar, Union\nimport unittest.mock # pylint: disable=unused-import\n\nimport numpy as np\n\n_T = TypeVar('_T')\n_F = TypeVar('_F', bound=Callable[..., Any])\n_UNDEFINED = object()\n\n# _NDArray = np.ndarray[Any, Any]\n_NDArray = Any # numpy typing is not yet mature.\n\n# https://github.com/python/mypy/issues/5667\nif typing.TYPE_CHECKING:\n _Path = Union[str, 'os.PathLike[str]']\nelse:\n _Path = Union[str, os.PathLike]\n\n\n## Language extensions\n\n\ndef assert_value(value: Optional[_T]) -> _T:\n \"\"\"Return value after asserting it, as in a functional assert.\n\n >>> assert_value('word')\n 'word'\n\n >>> assert_value(33)\n 33\n\n >>> assert_value(0.0)\n Traceback (most recent call last):\n ...\n AssertionError: 0.0\n\n >>> assert_value([])\n Traceback (most recent call last):\n ...\n AssertionError: []\n\n >>> assert_value(None)\n Traceback (most recent call last):\n ...\n AssertionError: None\n\n >>> assert_value(False)\n Traceback (most recent call last):\n ...\n AssertionError: False\n \"\"\"\n assert value, value\n return value\n\n\ndef assert_not_none(value: Optional[_T]) -> _T:\n \"\"\"Return value after asserting that it is not None.\n\n >>> assert_not_none('word')\n 'word'\n\n >>> assert_not_none(0)\n 0\n\n >>> assert_not_none('')\n ''\n\n >>> assert_not_none(())\n ()\n\n >>> assert_not_none(False)\n False\n\n >>> assert_not_none(None)\n Traceback (most recent call last):\n ...\n AssertionError\n \"\"\"\n assert value is not None\n return value\n\n\n## Debugging output\n\n\ndef check_eq(a: Any, b: Any) -> None:\n \"\"\"Assert that two values are equal or raise exception with a useful message.\n\n Args:\n a: First expression.\n b: Second expression.\n Raises:\n RuntimeError: If a != b (or np.any(a != b) if np.ndarray).\n\n >>> check_eq('a' + 'b', 'ab')\n\n >>> check_eq(1 + 2, 4)\n Traceback (most recent call last):\n ...\n AssertionError: 3 == 4\n \"\"\"\n check_fails = np.any(a != b) if isinstance(a, np.ndarray) else a != b\n if check_fails:\n raise AssertionError(f'{a!r} == {b!r}')\n\n\ndef print_err(*args: str, **kwargs: Any) -> None:\n r\"\"\"Prints arguments to stderr immediately.\n\n >>> with unittest.mock.patch('sys.stderr', new_callable=io.StringIO) as m:\n ... print_err('hello')\n ... print(repr(m.getvalue()))\n 'hello\\n'\n \"\"\"\n kwargs = {**dict(file=sys.stderr, flush=True), **kwargs}\n print(*args, **kwargs)\n\n\ndef dump_vars(*args: Any) -> str:\n \"\"\"Return a string showing the values of each expression.\n\n Specifically, convert each expression (contributed by the caller to the\n variable-parameter list *args) into a substring f'expression = {expression}'\n and join these substrings separated by ', '.\n\n If the caller itself provided a variable-parameter list (*args),\n the search continues in its callers. The approach examines a stack trace,\n so it is fragile and non-portable.\n\n Args:\n *args: Expressions to show.\n Raises:\n Exception: If the dump_vars(...) does not fit on a single source line.\n\n >>> a = 45\n >>> b = 'Hello'\n >>> dump_vars(a)\n 'a = 45'\n >>> dump_vars(b)\n 'b = Hello'\n >>> dump_vars(a, b, (a * 2) + 5, b + ' there')\n \"a = 45, b = Hello, (a * 2) + 5 = 95, b + ' there' = Hello there\"\n >>> dump_vars([3, 4, 5][1])\n '[3, 4, 5][1] = 4'\n \"\"\"\n\n def matching_parenthesis(text: str) -> int:\n \"\"\"Return the index of ')' matching '(' in text[0].\"\"\"\n check_eq(text[0], '(')\n num_open = 0\n for i, c in enumerate(text):\n if c == '(':\n num_open += 1\n elif c == ')':\n num_open -= 1\n if num_open == 0:\n return i\n raise RuntimeError(f'No matching right parenthesis in \"{text}\"')\n\n # Adapted from make_dict() in https://stackoverflow.com/a/2553524 .\n stack = traceback.extract_stack()\n this_function_name = stack[-1][2] # i.e. initially 'dump_vars'.\n for stackframe in stack[-2::-1]:\n (filename, unused_line_number, function_name, text) = stackframe # Caller.\n # https://docs.python.org/3/tutorial/errors.html:\n # \"however, it will not display lines read from standard input.\"\n if filename == '<stdin>':\n check_eq(text, '')\n return ', '.join(str(e) for e in args) # Unfortunate fallback.\n prefix = this_function_name + '('\n begin = text.find(prefix)\n if begin < 0:\n raise Exception(f'dump_vars: cannot find \"{prefix}\" in line \"{text}\"')\n begin += len(this_function_name)\n end = begin + matching_parenthesis(text[begin:])\n parameter_string = text[begin + 1:end].strip()\n if re.fullmatch(r'\\*[\\w]+', parameter_string):\n this_function_name = function_name\n # Because the call is made using a *args, we continue to\n # the earlier caller in the stack trace.\n else:\n if len(args) == 1:\n expressions = [parameter_string.strip()]\n elif hasattr(ast, 'get_source_segment'): # Python 3.8.\n node = ast.parse(parameter_string)\n # print(ast.dump(node)) # \", indent=2\" requires Python 3.9.\n value = getattr(node.body[0], 'value', '?')\n elements = getattr(value, 'elts', [value])\n\n def get_text(element: Any) -> str:\n text = ast.get_source_segment(parameter_string, element)\n return '?' if text is None else text\n\n expressions = [get_text(element) for element in elements]\n else:\n expressions = [name.strip() for name in parameter_string.split(',')]\n l = []\n for (expr, value) in zip(expressions, args):\n l.append(f'{expr} = {value}' if expr[0] not in '\"\\'' else str(value))\n return ', '.join(l)\n raise AssertionError\n\n\ndef show(*args: Any) -> None:\n r\"\"\"Prints expressions and their values on stdout.\n\n Args:\n *args: Expressions to show.\n\n >>> with unittest.mock.patch('sys.stdout', new_callable=io.StringIO) as m:\n ... show(4 * 3)\n ... check_eq(m.getvalue(), '4 * 3 = 12\\n')\n\n >>> with unittest.mock.patch('sys.stdout', new_callable=io.StringIO) as m:\n ... a ='<string>'\n ... show(a, 'literal_string', \"s\", a * 2, 34 // 3)\n ... s = m.getvalue()\n >>> s\n 'a = <string>, literal_string, s, a * 2 = <string><string>, 34 // 3 = 11\\n'\n \"\"\"\n print(dump_vars(*args), flush=True)\n\n\n## Jupyter/IPython notebook functionality\n\n\ndef in_colab() -> bool:\n \"\"\"Return True if running inside Google Colab.\n\n >>> in_colab()\n False\n \"\"\"\n try:\n import google.colab # pylint: disable=unused-import\n return True\n except ModuleNotFoundError:\n return False\n\n\nclass _CellTimer:\n \"\"\"Record timings of all notebook cells and show top entries at the end.\"\"\"\n # Inspired from https://github.com/cpcloud/ipython-autotime.\n\n instance: Optional['_CellTimer'] = None\n\n def __init__(self) -> None:\n import IPython\n self.elapsed_times: Dict[int, float] = {}\n self.start()\n IPython.get_ipython().events.register('pre_run_cell', self.start)\n IPython.get_ipython().events.register('post_run_cell', self.stop)\n\n def close(self) -> None:\n \"\"\"Destroy the _CellTimer and its notebook callbacks.\"\"\"\n import IPython\n IPython.get_ipython().events.unregister('pre_run_cell', self.start)\n IPython.get_ipython().events.unregister('post_run_cell', self.stop)\n\n def start(self) -> None:\n \"\"\"Start a timer for the notebook cell execution.\"\"\"\n self.start_time = time.monotonic()\n\n def stop(self) -> None:\n \"\"\"Start the timer for the notebook cell execution.\"\"\"\n import IPython\n elapsed_time = time.monotonic() - self.start_time\n input_index = IPython.get_ipython().execution_count\n if not in_colab():\n input_index -= 1\n self.elapsed_times[input_index] = elapsed_time\n\n def show_times(self, n: Optional[int] = None, sort: bool = False) -> None:\n \"\"\"Print notebook cell timings.\"\"\"\n import IPython\n print(f'Total time: {sum(self.elapsed_times.values()):.2f} s')\n times = list(self.elapsed_times.items())\n times = sorted(times, key=lambda x: x[sort], reverse=sort)\n # https://github.com/ipython/ipython/blob/master/IPython/core/history.py\n # https://ipython.readthedocs.io/en/stable/api/generated/IPython.core.history.html\n session = IPython.get_ipython().history_manager.session_number\n history_range = IPython.get_ipython().history_manager.get_range(session)\n inputs = {index: text for unused_session, index, text in history_range}\n for input_index, elapsed_time in itertools.islice(times, n):\n cell_input = inputs[input_index]\n print(f'In[{input_index:3}] {cell_input!r:60.60} {elapsed_time:6.3f} s')\n\n\ndef start_timing_notebook_cells() -> None:\n \"\"\"Start timing of Jupyter notebook cells.\n\n Place in an early notebook cell. See also `show_notebook_cell_top_times`.\n \"\"\"\n import IPython\n if IPython.get_ipython():\n if _CellTimer.instance:\n _CellTimer.instance.close()\n _CellTimer.instance = _CellTimer()\n\n\ndef show_notebook_cell_top_times() -> None:\n \"\"\"Print summary of timings for Jupyter notebook cells.\n\n Place in a late notebook cell. See also `start_timing_notebook_cells`.\n \"\"\"\n if _CellTimer.instance:\n _CellTimer.instance.show_times(n=20, sort=True)\n\n\n## Timing\n\n\ndef get_time_and_result(func: Callable[[], Any], *,\n max_repeat: int = 10,\n max_time: float = 2.0) -> Tuple[float, Any]:\n \"\"\"Call the function repeatedly to determine its minimum run time.\n\n If the measured run time is small, more precise time estimates are obtained\n by considering batches of function calls (with automatically increasing\n batch size).\n\n Args:\n func: Function to time.\n max_repeat: Maximum number of timing measurements across which to compute\n the minimum value.\n max_time: Desired maximum total time in obtaining timing measurements.\n\n Returns:\n minimum_time: The smallest time (in seconds) measured across the repeated\n calls to `func` (divided by batch size if batches are introduced).\n result: The value returned by the last call to `func`.\n\n >>> elapsed, result = get_time_and_result(lambda: 11 + 22)\n >>> assert elapsed < 0.01, elapsed\n >>> result\n 33\n \"\"\"\n assert callable(func) and max_repeat > 0 and max_time > 0.0\n result = None\n gc_was_enabled = gc.isenabled()\n batch_size = 1\n smallest_acceptable_batch_time = 0.01 # All times are in seconds.\n try:\n gc.disable()\n while True:\n num_repeat = 0\n sum_time = 0.0\n min_time = math.inf\n start = time.monotonic()\n while num_repeat < max_repeat and sum_time < max_time:\n for _ in range(batch_size):\n result = func()\n stop = time.monotonic()\n elapsed = stop - start\n start = stop\n num_repeat += 1\n sum_time += elapsed\n min_time = min(min_time, elapsed)\n if min_time >= smallest_acceptable_batch_time:\n break\n batch_size *= 10\n finally:\n if gc_was_enabled:\n gc.enable()\n return min_time / batch_size, result\n\n\ndef get_time(func: Callable[[], Any], **kwargs: Any) -> float:\n \"\"\"Return the minimum execution time when repeatedly calling `func`.\n\n >>> elapsed = get_time(lambda: time.sleep(0.2), max_repeat=1)\n >>> assert 0.15 < elapsed < 0.25, elapsed\n \"\"\"\n return get_time_and_result(func, **kwargs)[0]\n\n\ndef format_float(value: float, precision: int) -> str:\n \"\"\"Return non-scientific repr. of value with specified digits of precision.\n\n >>> format_float(1234, 3)\n '1230'\n\n >>> format_float(0.1234, 3)\n '0.123'\n\n >>> format_float(0.1230, 3)\n '0.123'\n\n >>> format_float(0.01236, 3)\n '0.0124'\n\n >>> format_float(123, 3)\n '123'\n\n >>> format_float(120, 3)\n '120'\n \"\"\"\n text = np.format_float_positional(\n value, fractional=False, unique=False, precision=precision)\n return text.rstrip('.')\n\n\ndef print_time(func: Callable[[], Any], **kwargs: Any) -> None:\n r\"\"\"Print the minimum execution time when repeatedly calling `func`.\n\n >>> with unittest.mock.patch('sys.stdout', new_callable=io.StringIO) as m:\n ... print_time(lambda: 1)\n ... assert re.fullmatch(r'[0-9.]+ .?s\\n', m.getvalue()), m.getvalue()\n\n \"\"\"\n min_time = get_time(func, **kwargs)\n # print(f'{min_time:.3f} s', flush=True)\n text = (f'{format_float(min_time, 3)} s' if min_time >= 1.0 else\n f'{format_float(min_time*1e3, 3)} ms' if min_time > 1.0 / 1e3 else\n f'{format_float(min_time*1e6, 3)} µs' if min_time > 1.0 / 1e6 else\n f'{format_float(min_time*1e6, 2)} µs')\n print(text, flush=True)\n\n\n## Profiling\n\n\ndef prun(func: Callable[[], Any], mode: str = 'tottime',\n top: Optional[int] = None) -> None:\n \"\"\"Profile the function call and print reformatted statistics.\n\n >>> with unittest.mock.patch('sys.stdout', new_callable=io.StringIO) as m:\n ... prun(lambda: np.linalg.qr(np.random.random((400, 400))))\n ... lines = m.getvalue().splitlines()\n >>> assert lines[0].startswith('# Prun: tottime ')\n >>> assert 'overall_cumtime' in lines[0]\n >>> assert len(lines) >= 4\n \"\"\"\n assert callable(func)\n assert mode in ('original', 'full', 'tottime'), mode\n\n profile = cProfile.Profile()\n try:\n profile.enable()\n func()\n finally:\n profile.disable()\n\n with io.StringIO() as string_io:\n args = (top,) if top is not None else ()\n pstats.Stats(profile, stream=string_io).sort_stats(\n 'tottime').print_stats(*args)\n lines = string_io.getvalue().strip('\\n').splitlines()\n\n if mode == 'original':\n print('\\n'.join(lines))\n return\n\n def beautify_function_name(name: str) -> str:\n name = re.sub(r'^\\{built-in method (.*)\\}$', r'\\1 (built-in)', name)\n name = re.sub(r\"^\\{method '(.*)' of '(.*)' objects\\}$\", r'\\2.\\1', name)\n name = re.sub(r'^\\{function (\\S+) at (0x\\w+)\\}$', r'\\1', name)\n name = re.sub(r'^<ipython-input[-\\w]+>:\\d+\\((.*)\\)$', r'\\1', name)\n name = re.sub(r'^([^:()]+):(\\d+)\\((.+)\\)$', r'\\3 (\\1:\\2)', name)\n name = re.sub(r'^\\{(\\S+)\\}$', r'\\1', name)\n name = re.sub(r' \\(/tmp/ipykernel.*\\.py:', r' (/tmp/ipykernel:', name)\n return name\n\n output = []\n overall_time = 0.0\n post_header = False\n for line in lines:\n if post_header:\n tottime_str, cumtime_str, name = assert_value(re.fullmatch(\n r'\\s*\\S+\\s+(\\S+)\\s+\\S+\\s+(\\S+)\\s+\\S+\\s+(\\S.*)', line)).groups()\n tottime, cumtime = float(tottime_str), float(cumtime_str)\n beautified_name = beautify_function_name(name)\n overall_time += 1e-6\n significant_time = (tottime / overall_time > 0.05 or\n 0.05 < cumtime / overall_time < 0.95)\n if top is not None or significant_time:\n if mode == 'tottime':\n output.append(f' {tottime:8.3f} {cumtime:8.3f} {beautified_name}')\n else: # mode == 'full'\n output.append(line.replace(name, beautified_name))\n elif ' function calls ' in line:\n overall_time = float(\n assert_value(re.search(r' in (\\S+) seconds', line)).group(1))\n output.append(f'Prun: tottime {overall_time:8.3f} overall_cumtime')\n elif line.lstrip().startswith('ncalls '):\n if mode == 'full':\n output.append(line)\n post_header = True\n\n print('\\n'.join([f'#{\" \" * bool(line)}' + line for line in output]))\n\n\n## Operations on iterables\n\n\ndef repeat_each(iterable: Iterable[_T], n: int) -> Iterator[_T]:\n \"\"\"Repeat each element of iterable 'n' times.\n\n >>> list(repeat_each(list('abc'), 2))\n ['a', 'a', 'b', 'b', 'c', 'c']\n\n >>> ''.join(itertools.islice(repeat_each(itertools.cycle('abcd'), 4), 30))\n 'aaaabbbbccccddddaaaabbbbccccdd'\n \"\"\"\n # https://stackoverflow.com/a/65071833\n return itertools.chain.from_iterable(zip(*itertools.tee(iterable, n)))\n\n\ndef only(iterable: Iterable[_T]) -> _T:\n \"\"\"Return the first element and asserts that there are no more.\n\n >>> only(range(1))\n 0\n\n >>> only(range(2))\n Traceback (most recent call last):\n ...\n ValueError: [0, 1, ...] has more than one element\n\n >>> only(range(0))\n Traceback (most recent call last):\n ...\n StopIteration\n \"\"\"\n # Or use: return (lambda x: x)(*iterable)\n iterator = iter(iterable)\n first = next(iterator)\n missing = object()\n second = next(iterator, missing)\n if second != missing:\n raise ValueError(f'[{first}, {second}, ...] has more than one element')\n return first\n\n\ndef grouped(iterable: Iterable[_T],\n n: int,\n fillvalue: Optional[_T] = None,\n ) -> Iterator[Tuple[Optional[_T], ...]]:\n \"\"\"Return elements collected into fixed-length chunks.\n\n >>> list(grouped('ABCDEFG', 3, 'x'))\n [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]\n\n >>> list(grouped(range(5), 3))\n [(0, 1, 2), (3, 4, None)]\n\n >>> list(grouped(range(5), 3, fillvalue=9))\n [(0, 1, 2), (3, 4, 9)]\n\n >>> list(grouped(range(6), 3))\n [(0, 1, 2), (3, 4, 5)]\n\n >>> list(grouped([], 2))\n []\n \"\"\"\n # From grouper() in https://docs.python.org/3/library/itertools.html.\n iters = [iter(iterable)] * n\n return itertools.zip_longest(*iters, fillvalue=fillvalue)\n\n\ndef chunked(iterable: Iterable[_T],\n n: Optional[int] = None,\n ) -> Iterator[Tuple[_T, ...]]:\n \"\"\"Return elements collected as tuples of length at most 'n' if not None.\n\n >>> list(chunked('ABCDEFG', 3))\n [('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)]\n\n >>> list(chunked(range(5), 3))\n [(0, 1, 2), (3, 4)]\n\n >>> list(chunked(range(5)))\n [(0, 1, 2, 3, 4)]\n\n >>> list(chunked([]))\n []\n \"\"\"\n\n def take(n: int, iterable: Iterable[_T]) -> Tuple[_T, ...]:\n return tuple(itertools.islice(iterable, n))\n\n return iter(functools.partial(take, n, iter(iterable)), ())\n\n\ndef sliding_window(iterable: Iterable[_T], n: int) -> Iterator[Tuple[_T, ...]]:\n \"\"\"Return overlapping tuples of length `n` from `iterable`.\n\n >>> list(sliding_window('ABCDEF', 4))\n [('A', 'B', 'C', 'D'), ('B', 'C', 'D', 'E'), ('C', 'D', 'E', 'F')]\n\n >>> list(sliding_window('ABCDE', 1))\n [('A',), ('B',), ('C',), ('D',), ('E',)]\n\n >>> list(sliding_window('ABCDE', 8))\n []\n >>> list(sliding_window('A', 2))\n []\n >>> list(sliding_window('', 1))\n []\n \"\"\"\n # From https://docs.python.org/3/library/itertools.html.\n it = iter(iterable)\n window = collections.deque(itertools.islice(it, n), maxlen=n)\n if len(window) == n:\n yield tuple(window)\n for x in it:\n window.append(x)\n yield tuple(window)\n\n\ndef powerset(iterable: Iterable[_T]) -> Iterator[Tuple[_T, ...]]:\n \"\"\"Return all subsets of iterable.\n\n >>> list(powerset([1, 2, 3]))\n [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]\n\n >>> list(powerset([]))\n [()]\n \"\"\"\n # From https://docs.python.org/3/library/itertools.html.\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s) + 1))\n\n\ndef peek_first(iterator: Iterable[_T]) -> Tuple[_T, Iterable[_T]]:\n \"\"\"Given an iterator, return first element and re-initialized iterator.\n\n Example:\n first_image, images = peek_first(images)\n\n Args:\n iterator: An input iterator or iterable.\n\n Returns:\n A tuple (first_element, iterator_reinitialized) containing:\n first_element: The first element of the input.\n iterator_reinitialized: A clone of the original iterator/iterable.\n\n >>> value, iter = peek_first(range(5))\n >>> value\n 0\n >>> list(iter)\n [0, 1, 2, 3, 4]\n \"\"\"\n # Inspired from https://stackoverflow.com/a/12059829\n peeker, iterator_reinitialized = itertools.tee(iterator)\n first = next(peeker)\n return first, iterator_reinitialized\n\n\n## Temporary variable assignment\n\n\[email protected]\ndef temporary_assignment(variables: Dict[str, Any], name: str,\n value: Any) -> Generator[None, None, None]:\n \"\"\"Temporarily assign `value` to the variable named `name` in `variables`.\n\n Args:\n variables: Usually the `globals()` of the caller module. Note that a\n function-scope `locals()` does not work as it should not be modified.\n name: Name of the variable in `variables` to temporarily assign.\n value: Value assigned to `name` in the lifetime of the context.\n\n >>> var = 1\n >>> with temporary_assignment(globals(), 'var', 2):\n ... check_eq(var, 2)\n >>> check_eq(var, 1)\n\n >>> assert 'var2' not in globals()\n >>> with temporary_assignment(globals(), 'var2', '1'):\n ... check_eq(var2, '1')\n >>> assert 'var2' not in globals()\n \"\"\"\n # https://stackoverflow.com/a/57226721.\n old_value = variables.get(name, _UNDEFINED)\n variables[name] = value\n yield\n if old_value is _UNDEFINED:\n del variables[name]\n else:\n variables[name] = old_value\n\n\n## Meta programming\n\n\[email protected] # Bare decorator.\ndef noop_decorator(func: _F) -> _F: ...\n\n\[email protected] # Decorator with arguments.\ndef noop_decorator(*args: Any, **kwargs: Any) -> Callable[[_F], _F]: ...\n\n\ndef noop_decorator(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Return function decorated with no-op; invokable with or without args.\n\n >>> @noop_decorator\n ... def func1(x): return x * 10\n\n >>> @noop_decorator()\n ... def func2(x): return x * 10\n\n >>> @noop_decorator(2, 3)\n ... def func3(x): return x * 10\n\n >>> @noop_decorator(keyword=True)\n ... def func4(x): return x * 10\n\n >>> check_eq(func1(1) + func2(1) + func3(1) + func4(1), 40)\n \"\"\"\n if len(args) != 1 or not callable(args[0]) or kwargs:\n return noop_decorator # Decorator is invoked with arguments; ignore them.\n func: Callable[[Any], Any] = args[0]\n return func\n\n\ndef terse_str(cls: type) -> type:\n \"\"\"Decorator for a dataclasses.dataclass, which defines a custom str().\n\n >>> @terse_str\n ... @dataclasses.dataclass\n ... class TestTerseStr:\n ... a: int = 3\n ... b: List[str] = dataclasses.field(default_factory=lambda: ['g', 'h'])\n\n >>> str(TestTerseStr())\n 'TestTerseStr()'\n\n >>> str(TestTerseStr(a=4))\n 'TestTerseStr(a=4)'\n\n >>> str(TestTerseStr(b=['i', 'j']))\n \"TestTerseStr(b=['i', 'j'])\"\n \"\"\"\n assert isinstance(cls, type)\n default_for_field = {\n field.name: (field.default_factory() if callable(field.default_factory)\n else field.default)\n for field in dataclasses.fields(cls)\n if field.repr\n }\n\n def __str__(self: Any) -> str:\n \"\"\"Return a string containing only the non-default field values.\"\"\"\n text = ', '.join(f'{name}={getattr(self, name)!r}'\n for name, default in default_for_field.items()\n if getattr(self, name) != default)\n return f'{type(self).__name__}({text})'\n\n setattr(cls, '__str__', __str__)\n return cls\n\n\n## Imports and modules\n\n\n# If placing this code in a package, rename this file to __init__.py\n# as discussed in https://pcarleton.com/2016/09/06/python-init/\n# to avoid long names like package.module.function. See the example in\n# https://github.com/python/cpython/blob/master/Lib/collections/__init__.py\n\n\ndef create_module(module_name: str, elements: Iterable[Any] = ()) -> Any:\n \"\"\"Return a new empty module (not associated with any file).\n\n >>> def some_function(*args, **kwargs): return 'success'\n >>> class Node:\n ... def __init__(self): self.attrib = 2\n >>> test_module = create_module('test_module', [some_function, Node])\n >>> test_module.some_function(10)\n 'success'\n >>> assert 'some_function' in dir(test_module)\n >>> help(test_module.some_function)\n Help on function some_function in module test_module:\n <BLANKLINE>\n some_function(*args, **kwargs)\n <BLANKLINE>\n >>> node = test_module.Node()\n >>> type(node)\n <class 'test_module.Node'>\n >>> node.attrib\n 2\n \"\"\"\n # https://stackoverflow.com/a/53080237\n module = sys.modules.get(module_name)\n if not module:\n spec = importlib.util.spec_from_loader(module_name, loader=None)\n assert spec\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n\n for element in elements:\n setattr(module, element.__name__, element)\n element.__module__ = module_name\n\n return module\n\n\n## System functions\n\n\[email protected]\ndef timing(description: str = 'Timing') -> Generator[None, None, None]:\n \"\"\"Context that reports elapsed time.\n\n Example:\n with timing('List comprehension example'):\n _ = [i for i in range(10_000_000)]\n\n Args:\n description: A string to print before the elapsed time.\n\n Yields:\n None.\n\n >>> with timing('List comprehension example'):\n ... _ = [i for i in range(10_000)] # doctest:+ELLIPSIS\n List comprehension example: 0.00...\n \"\"\"\n start = time.monotonic()\n yield\n elapsed_time = time.monotonic() - start\n print(f'{description}: {elapsed_time:.6f}')\n\n\ndef typename(o: Any) -> str:\n \"\"\"Return the full name (including module) of the type of o.\n\n >>> typename(5)\n 'int'\n\n >>> typename('text')\n 'str'\n\n >>> typename(np.array([1]))\n 'numpy.ndarray'\n \"\"\"\n # https://stackoverflow.com/a/2020083\n name: str = o.__class__.__qualname__\n module = o.__class__.__module__\n return name if module in (None, 'builtins') else f'{module}.{name}'\n\n\ndef show_biggest_vars(variables: Mapping[str, Any], n: int = 10) -> None:\n \"\"\"Print the variables with the largest memory allocation (in bytes).\n\n Usage:\n show_biggest_vars(globals())\n\n Args:\n variables: Dictionary of variables (often, `globals()`).\n n: The number of largest variables to list.\n\n >>> show_biggest_vars({'i': 12, 's': 'text', 'ar': np.ones((1000, 1000))})\n ... # doctest:+ELLIPSIS\n ar numpy.ndarray ...\n s str ...\n i int ...\n \"\"\"\n var = variables\n infos = [(name, sys.getsizeof(value), typename(value))\n for name, value in var.items()]\n infos.sort(key=lambda pair: pair[1], reverse=True)\n for name, size, vartype in infos[:n]:\n print(f'{name:24} {vartype:20} {size:_}')\n\n\n## Mathematics\n\n\ndef as_float(a: Any) -> _NDArray:\n \"\"\"Convert non-floating-point array to floating-point type.\n\n Args:\n a: Input array.\n\n Returns:\n Array 'a' if it is already floating-point (np.float32 or np.float64),\n else 'a' converted to type np.float32 or np.float64 based on the necessary\n precision. Note that 64-bit integers cannot be represented exactly.\n\n >>> as_float(np.array([1.0, 2.0]))\n array([1., 2.])\n\n >>> as_float(np.array([1.0, 2.0], dtype=np.float32))\n array([1., 2.], dtype=float32)\n\n >>> as_float(np.array([1.0, 2.0], dtype='float64'))\n array([1., 2.])\n\n >>> as_float(np.array([1, 2], dtype=np.uint8))\n array([1., 2.], dtype=float32)\n\n >>> as_float(np.array([1, 2], dtype=np.uint16))\n array([1., 2.], dtype=float32)\n\n >>> as_float(np.array([1, 2]))\n array([1., 2.])\n \"\"\"\n a = np.asarray(a)\n if issubclass(a.dtype.type, np.floating):\n return a\n dtype = np.float64 if np.iinfo(a.dtype).bits >= 32 else np.float32\n return a.astype(dtype)\n\n\ndef normalize(a: Any, axis: Optional[int] = None) -> _NDArray:\n \"\"\"Return array 'a' scaled such that its elements have unit 2-norm.\n\n Args:\n a: Input array.\n axis: Optional axis. If None, normalizes the entire matrix. Otherwise,\n normalizes each element along the specified axis.\n\n Returns:\n An array such that its elements along 'axis' are rescaled to have L2 norm\n equal to 1. Any element with zero norm is replaced by nan values.\n\n >>> normalize(np.array([10, 10, 0]))\n array([0.70710678, 0.70710678, 0. ])\n\n >>> normalize([[0, 10], [10, 10]], axis=-1)\n array([[0. , 1. ],\n [0.70710678, 0.70710678]])\n\n >>> normalize([[0, 10], [10, 10]], axis=0)\n array([[0. , 0.70710678],\n [1. , 0.70710678]])\n\n >>> normalize([[0, 10], [10, 10]])\n array([[0. , 0.57735027],\n [0.57735027, 0.57735027]])\n \"\"\"\n a = np.asarray(a)\n norm = np.linalg.norm(a, axis=axis)\n if axis is not None:\n norm = np.expand_dims(norm, axis)\n with np.errstate(invalid='ignore'):\n return a / norm\n\n\ndef rms(a: Any, axis: Optional[int] = None) -> Union[float, _NDArray]:\n \"\"\"Return the root mean square of the array values.\n\n >>> rms([3.0])\n 3.0\n\n >>> rms([-3.0, 4.0])\n 3.5355339059327378\n\n >>> rms([10, 11, 12])\n 11.030261405182864\n\n >>> rms([[-1.0, 1.0], [0.0, -2.0]])\n 1.224744871391589\n\n >>> rms([[-1.0, 1.0], [0.0, -2.0]], axis=-1)\n array([1. , 1.41421356])\n \"\"\"\n return np.sqrt(np.mean(np.square(as_float(a)), axis, dtype=np.float64))\n\n\ndef lenient_subtract(a: Any, b: Any) -> Any:\n \"\"\"Return a - b, but returns 0 where a and b are the same signed infinity.\n\n >>> inf = math.inf\n >>> lenient_subtract([3., 3., inf, inf, -inf, -inf],\n ... [1., inf, inf, -inf, inf, -inf])\n array([ 2., -inf, 0., inf, -inf, 0.])\n \"\"\"\n a = np.asarray(a)\n b = np.asarray(b)\n same_infinity = ((np.isposinf(a) & np.isposinf(b)) |\n (np.isneginf(a) & np.isneginf(b)))\n return np.subtract(a, b, out=np.zeros_like(a), where=~same_infinity)\n\n\ndef print_array(a: Any, **kwargs: Any) -> None:\n \"\"\"Print the array.\n\n >>> print_array(np.arange(6).reshape(2, 3), file=sys.stdout)\n array([[0, 1, 2],\n [3, 4, 5]]) shape=(2, 3) dtype=int64\n \"\"\"\n x = np.asarray(a)\n print_err(f'{repr(x)} shape={x.shape} dtype={x.dtype}', **kwargs)\n\n\ndef prime_factors(n: int) -> List[int]:\n \"\"\"Return an ascending list of the (greather-than-one) prime factors of n.\n\n >>> prime_factors(1)\n []\n\n >>> prime_factors(2)\n [2]\n\n >>> prime_factors(4)\n [2, 2]\n\n >>> prime_factors(60)\n [2, 2, 3, 5]\n \"\"\"\n factors = []\n d = 2\n while d * d <= n:\n while (n % d) == 0:\n factors.append(d)\n n //= d\n d += 1\n if n > 1:\n factors.append(n)\n return factors\n\n\ndef extended_gcd(a: int, b: int) -> Tuple[int, int, int]:\n \"\"\"Find the greatest common divisor using the extended Euclidean algorithm.\n\n Returns:\n A tuple (gcd, x, y) with the property that a * x + b * y = gcd.\n\n >>> extended_gcd(29, 71)\n (1, -22, 9)\n >>> (29 * -22) % 71\n 1\n \"\"\"\n prev_x, x = 1, 0\n prev_y, y = 0, 1\n while b:\n q = a // b\n x, prev_x = prev_x - q * x, x\n y, prev_y = prev_y - q * y, y\n a, b = b, a % b\n x, y = prev_x, prev_y\n return a, x, y\n\n\ndef modular_inverse(a: int, b: int) -> int:\n \"\"\"Return the multiplicative inverse of 'a' with respect to the modulus 'b'.\n\n With the extended Euclidean algorithm, for the case that a and b are coprime,\n i.e. gcd(a, b) = 1, applying \"modulo b\" to both sides of a * x + b * y = 1\n results in (a * x) % b = 1, and hence 'x' is a modular multiplicative inverse\n of 'a' with respect to the modulus 'b'.\n See https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n\n >>> modular_inverse(29, 71)\n 49\n >>> (29 * 49) % 71\n 1\n \"\"\"\n # Note: This becomes available as \"pow(a, -1, mod=b)\" in Python 3.8.\n gcd, x, unused_y = extended_gcd(a, b)\n check_eq(gcd, 1)\n return x % b\n\n\ndef diagnostic(a: Any) -> str:\n \"\"\"Return a diagnostic string summarizing the values in 'a' for debugging.\n\n Args:\n a: Input values; must be convertible to an np.ndarray of scalars.\n\n Returns:\n A string summarizing the different types of arithmetic values.\n\n >>> import textwrap\n >>> print(textwrap.fill(diagnostic(\n ... [[math.nan, math.inf, -math.inf, -math.inf], [0, -1, 2, -0]])))\n shape=(2, 4) dtype=float64 size=8 nan=1 posinf=1 neginf=2 finite=4,\n min=-1.0, max=2.0, avg=0.25, sdv=1.25831) zero=2\n \"\"\"\n a = np.asarray(a)\n dtype = a.dtype\n if dtype == bool:\n a = a.astype(np.uint8)\n finite = a[np.isfinite(a)]\n return (f'shape={a.shape} dtype={dtype} size={a.size}'\n f' nan={np.isnan(a).sum()}'\n f' posinf={np.isposinf(a).sum()}'\n f' neginf={np.isneginf(a).sum()}'\n f' finite{repr(Stats(finite))[10:]}'\n f' zero={(finite == 0).sum()}')\n\n\n## Statistics\n\n\nclass Stats:\n r\"\"\"Statistics computed from numbers in an iterable.\n\n >>> Stats()\n Stats(size=0, min=inf, max=-inf, avg=nan, sdv=nan)\n\n >>> Stats([1.5])\n Stats(size=1, min=1.5, max=1.5, avg=1.5, sdv=0.0)\n\n >>> Stats(range(3, 5))\n Stats(size=2, min=3, max=4, avg=3.5, sdv=0.707107)\n\n >>> Stats([3.0, 4.0])\n Stats(size=2, min=3.0, max=4.0, avg=3.5, sdv=0.707107)\n\n >>> Stats([-12345., 2.0**20])\n Stats(size=2, min=-12345.0, max=1.04858e+06, avg=5.18116e+05, sdv=7.50184e+05)\n\n >>> print(Stats(range(55)))\n ( 55) 0 : 54 av=27.0000 sd=16.0208\n\n >>> print(Stats())\n ( 0) inf : -inf av=nan sd=nan\n\n >>> str(Stats() + Stats([3.0]))\n '( 1) 3.00000 : 3.00000 av=3.00000 sd=0.00000'\n\n >>> print(f'{Stats([-12345., 2.0**20]):14.9}')\n ( 2) -12345.0 : 1048576.0 av=518115.5 sd=750184.433\n\n >>> print(f'{Stats([-12345., 2.0**20]):#10.4}')\n ( 2) -1.234e+04 : 1.049e+06 av=5.181e+05 sd=7.502e+05\n\n >>> len(Stats([1, 2]))\n 2\n >>> Stats([-2, 2]).rms()\n 2.0\n\n >>> a = Stats([1, 2])\n >>> a.min(), a.max(), a.avg()\n (1, 2, 1.5)\n\n >>> stats1 = Stats([-3, 7])\n >>> stats2 = Stats([1.25e11 / 3, -1_234_567_890])\n >>> stats3 = stats1 + stats2 * 20_000_000\n >>> print(stats1, f'{stats2}', format(stats3), sep='\\n')\n ( 2) -3 : 7 av=2.00000 sd=7.07107\n ( 2) -1.23457e+09 : 4.16667e+10 av=2.02160e+10 sd=3.03358e+10\n ( 40_000_002) -1.23457e+09 : 4.16667e+10 av=2.02160e+10 sd=2.14506e+10\n\n >>> fmt = '9.3'\n >>> print(f'{stats1:{fmt}}', f'{stats2:{fmt}}', f'{stats3:{fmt}}', sep='\\n')\n ( 2) -3 : 7 av=2.0 sd=7.07\n ( 2) -1.23e+09 : 4.17e+10 av=2.02e+10 sd=3.03e+10\n ( 40_000_002) -1.23e+09 : 4.17e+10 av=2.02e+10 sd=2.15e+10\n \"\"\"\n\n _size: int\n _sum: float\n _sum2: float\n _min: float\n _max: float\n\n def __init__(self, *args: Any) -> None:\n if not args:\n self._size = 0\n self._sum = 0.0\n self._sum2 = 0.0\n self._min = math.inf\n self._max = -math.inf\n elif len(args) == 1:\n a = array_always(args[0])\n self._size = a.size\n self._sum = a.sum()\n self._sum2 = np.square(a).sum()\n self._min = a.min() if a.size > 0 else math.inf\n self._max = a.max() if a.size > 0 else -math.inf\n else:\n (self._size, self._sum, self._sum2, self._min, self._max) = args\n\n def sum(self) -> float:\n \"\"\"Return the sum of the values.\n\n >>> f'{Stats([3.5, 2.2, 4.4]).sum():.8g}'\n '10.1'\n \"\"\"\n return self._sum\n\n def min(self) -> float:\n \"\"\"Return the minimum value.\n\n >>> Stats([3.5, 2.2, 4.4]).min()\n 2.2\n \"\"\"\n return self._min\n\n def max(self) -> float:\n \"\"\"Return the maximum value.\n\n >>> Stats([3.5, 2.2, 4.4]).max()\n 4.4\n \"\"\"\n return self._max\n\n def avg(self) -> float:\n \"\"\"Return the average.\n\n >>> Stats([1, 1, 4]).avg()\n 2.0\n \"\"\"\n return math.nan if self._size == 0 else self._sum / self._size\n\n def ssd(self) -> float:\n \"\"\"Return the sum of squared deviations.\n\n >>> Stats([1, 1, 4]).ssd()\n 6.0\n \"\"\"\n return (math.nan if self._size == 0 else\n max(self._sum2 - self._sum**2 / self._size, 0))\n\n def var(self) -> float:\n \"\"\"Return the unbiased estimate of variance, as in np.var(a, ddof=1).\n\n >>> Stats([1, 1, 4]).var()\n 3.0\n \"\"\"\n return (math.nan if self._size == 0 else\n 0.0 if self._size == 1 else\n self.ssd() / (self._size - 1))\n\n def sdv(self) -> float:\n \"\"\"Return the unbiased standard deviation as in np.std(a, ddof=1).\n\n >>> Stats([1, 1, 4]).sdv()\n 1.7320508075688772\n \"\"\"\n return self.var()**0.5\n\n def rms(self) -> float:\n \"\"\"Return the root-mean-square.\n\n >>> Stats([1, 1, 4]).rms()\n 2.449489742783178\n >>> Stats([-1, 1]).rms()\n 1.0\n \"\"\"\n return 0.0 if self._size == 0 else (self._sum2 / self._size)**0.5\n\n def __format__(self, format_spec: str = '') -> str:\n \"\"\"Return a summary of the statistics (size, min, max, avg, sdv).\"\"\"\n fmt = format_spec if format_spec else '#12.6'\n fmt_int = fmt[:fmt.find('.')] if fmt.find('.') >= 0 else ''\n fmt_min = fmt if isinstance(self._min, np.floating) else fmt_int\n fmt_max = fmt if isinstance(self._max, np.floating) else fmt_int\n return (f'({self._size:11_})'\n f' {self._min:{fmt_min}} :'\n f' {self._max:<{fmt_max}}'\n f' av={self.avg():<{fmt}}'\n f' sd={self.sdv():<{fmt}}').rstrip()\n\n def __str__(self) -> str:\n return self.__format__()\n\n def __repr__(self) -> str:\n fmt = '.6'\n fmt_int = ''\n fmt_min = fmt if isinstance(self._min, np.floating) else fmt_int\n fmt_max = fmt if isinstance(self._max, np.floating) else fmt_int\n return (f'Stats(size={self._size}, '\n f'min={self._min:{fmt_min}}, '\n f'max={self._max:{fmt_max}}, '\n f'avg={self.avg():{fmt}}, '\n f'sdv={self.sdv():{fmt}})')\n\n def __len__(self) -> int:\n return self._size\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Stats):\n return NotImplemented\n return ((self._size, self._sum, self._sum2, self._min, self._max) ==\n (other._size, other._sum, other._sum2, other._min, other._max))\n\n def __add__(self, other: 'Stats') -> 'Stats':\n \"\"\"Return combined statistics.\n\n >>> Stats([2, -1]) + Stats([7, 5]) == Stats([-1, 2, 5, 7])\n True\n \"\"\"\n return Stats(self._size + other._size, self._sum + other._sum,\n self._sum2 + other._sum2, min(self._min, other._min),\n max(self._max, other._max))\n\n def __mul__(self, n: int) -> 'Stats':\n \"\"\"Return statistics whereby each element appears 'n' times.\n\n >>> Stats([4, -2]) * 3 == Stats([-2, -2, -2, 4, 4, 4])\n True\n \"\"\"\n return Stats(\n self._size * n, self._sum * n, self._sum2 * n, self._min, self._max)\n\n\n## Numpy operations\n\n\ndef array_always(a: Any) -> _NDArray:\n \"\"\"Return a numpy array even if a is an iterator of subarrays.\n\n >>> array_always(np.array([[1, 2], [3, 4]]))\n array([[1, 2],\n [3, 4]])\n\n >>> array_always(range(3) for _ in range(2))\n array([[0, 1, 2],\n [0, 1, 2]])\n\n >>> array_always(np.array([[1, 2], [3, 4]]))\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if isinstance(a, collections.abc.Iterator):\n return np.array(tuple(a))\n return np.asarray(a)\n\n\ndef bounding_slices(a: Any) -> Tuple[slice, ...]:\n \"\"\"Return the slices that bound the nonzero elements of array.\n\n >>> bounding_slices(())\n (slice(0, 0, None),)\n\n >>> bounding_slices(np.ones(0))\n (slice(0, 0, None),)\n\n >>> bounding_slices(np.ones((0, 10)))\n (slice(0, 0, None), slice(0, 0, None))\n\n >>> bounding_slices(32.0)\n (slice(0, 1, None),)\n\n >>> bounding_slices([0.0, 0.0, 0.0, 0.5, 1.5, 0.0, 2.5, 0.0, 0.0])\n (slice(3, 7, None),)\n\n >>> a = np.array([0, 0, 6, 7, 0, 0])\n >>> a[bounding_slices(a)]\n array([6, 7])\n\n >>> a = np.array([[0, 0, 0], [0, 1, 1], [0, 0, 0]])\n >>> a[bounding_slices(a)]\n array([[1, 1]])\n\n >>> bounding_slices([[[0, 0], [0, 1]], [[0, 0], [0, 0]]])\n (slice(0, 1, None), slice(1, 2, None), slice(1, 2, None))\n \"\"\"\n a = np.atleast_1d(a)\n slices = []\n for dim in range(a.ndim):\n line = a.any(axis=tuple(i for i in range(a.ndim) if i != dim))\n indices = line.nonzero()[0]\n if indices.size:\n vmin, vmax = indices[[0, -1]]\n slices.append(slice(vmin, vmax + 1))\n else:\n slices.append(slice(0, 0)) # Empty slice.\n return tuple(slices)\n\n\ndef broadcast_block(a: Any, block_shape: Any) -> _NDArray:\n \"\"\"Return an array view where each element of 'a' is repeated as a block.\n\n Args:\n a: input array of any dimension.\n block_shape: shape for the block that each element of 'a' becomes. If a\n scalar value, all block dimensions are assigned this value.\n\n Returns:\n an array view with shape \"a.shape * block_shape\".\n\n >>> print(broadcast_block(np.arange(8).reshape(2, 4), (2, 3)))\n [[0 0 0 1 1 1 2 2 2 3 3 3]\n [0 0 0 1 1 1 2 2 2 3 3 3]\n [4 4 4 5 5 5 6 6 6 7 7 7]\n [4 4 4 5 5 5 6 6 6 7 7 7]]\n\n >>> a = np.arange(6).reshape(2, 3)\n >>> result = broadcast_block(a, (2, 3))\n >>> result.shape\n (4, 9)\n >>> np.all(result == np.kron(a, np.ones((2, 3), dtype=a.dtype)))\n True\n \"\"\"\n block_shape = np.broadcast_to(block_shape, (a.ndim,))\n # Inspired from https://stackoverflow.com/a/52339952\n # and https://stackoverflow.com/a/52346065\n shape1 = tuple(v for pair in zip(a.shape, (1,) * a.ndim) for v in pair)\n shape2 = tuple(v for pair in zip(a.shape, block_shape) for v in pair)\n final_shape = a.shape * block_shape\n return np.broadcast_to(a.reshape(shape1), shape2).reshape(final_shape)\n\n\ndef np_int_from_ch(a: Any, int_from_ch: Mapping[str, int],\n dtype: Any = None) -> _NDArray:\n \"\"\"Return array of integers by mapping from array of characters.\n\n >>> np_int_from_ch(np.array(list('abcab')), {'a': 0, 'b': 1, 'c': 2})\n array([0, 1, 2, 0, 1])\n \"\"\"\n# Adapted from https://stackoverflow.com/a/49566980\n a = np.asarray(a).view(np.int32)\n lookup = np.zeros(a.max() + 1, dtype=dtype or np.int64)\n for ch, value in int_from_ch.items():\n lookup[ord(ch)] = value\n return lookup[a]\n\n\ndef grid_from_string(string: str,\n int_from_ch: Optional[Mapping[str, int]] = None,\n dtype: Any = None) -> _NDArray:\n r\"\"\"Return a 2D array created from a multiline string.\n\n Args:\n string: Nonempty lines correspond to the rows of the grid, with one chr\n per grid element.\n int_from_ch: Mapping from the chr in string to integers in the resulting\n grid; if None, the grid contains chr elements (dtype='<U1').\n dtype: Integer element type for the result of int_from_ch.\n\n >>> string = '..B\\nB.A\\n'\n >>> g = grid_from_string(string)\n >>> g, g.nbytes\n (array([['.', '.', 'B'],\n ['B', '.', 'A']], dtype='<U1'), 24)\n\n >>> g = grid_from_string(string, {'.': 0, 'A': 1, 'B': 2})\n >>> g, g.nbytes\n (array([[0, 0, 2],\n [2, 0, 1]]), 48)\n\n >>> g = grid_from_string(string, {'.': 0, 'A': 1, 'B': 2}, dtype=np.uint8)\n >>> g, g.nbytes\n (array([[0, 0, 2],\n [2, 0, 1]], dtype=uint8), 6)\n \"\"\"\n # grid = np.array(list(map(list, string.strip('\\n').split('\\n')))) # Slow.\n lines = string.strip('\\n').splitlines()\n height, width = len(lines), len(lines[0])\n grid = np.empty((height, width), dtype='U1')\n dtype_for_row = f'U{width}'\n for i, line in enumerate(lines):\n grid[i].view(dtype_for_row)[0] = line\n\n if int_from_ch is None:\n assert dtype is None\n else:\n grid = np_int_from_ch(grid, int_from_ch, dtype=dtype)\n return grid\n\n\ndef string_from_grid(grid: Any,\n ch_from_int: Optional[Mapping[int, str]] = None) -> str:\n r\"\"\"Return a multiline string created from a 2D array.\n\n Args:\n grid: 2D array-like data containing either chr or integers.\n ch_from_int: Mapping from each integer in grid to the chr in the resulting\n string; if None, the grid must contain str or byte elements.\n\n >>> string_from_grid([[0, 1], [0, 0]], {0: '.', 1: '#'})\n '.#\\n..'\n\n >>> string_from_grid([['a', 'b', 'c'], ['d', 'e', 'f']])\n 'abc\\ndef'\n\n >>> string_from_grid([[b'A', b'B'], [b'C', b'D']])\n 'AB\\nCD'\n \"\"\"\n grid = np.asarray(grid)\n check_eq(grid.ndim, 2)\n lines = []\n for y in range(grid.shape[0]):\n if ch_from_int is None:\n if grid.dtype.kind == 'S': # or dtype.type == np.bytes_\n line = b''.join(grid[y]).decode('ascii')\n else:\n line = ''.join(grid[y])\n else:\n line = ''.join(ch_from_int[elem] for elem in grid[y])\n lines.append(line)\n return '\\n'.join(lines)\n\n\ndef grid_from_indices(iterable_or_map: Union[Iterable[Sequence[int]],\n Mapping[Sequence[int], Any]],\n background: Any = 0,\n foreground: Any = 1,\n indices_min: Optional[Union[int, Sequence[int]]] = None,\n indices_max: Optional[Union[int, Sequence[int]]] = None,\n pad: Union[int, Sequence[int]] = 0,\n dtype: Any = None) -> _NDArray:\n r\"\"\"Return an array from (sparse) indices or from a map {index: value}.\n\n Indices are sequences of integers with some length D, which determines the\n dimensionality of the output array. The array shape is computed by bounding\n the range of index coordinates in each dimension (which may be overriden by\n 'indices_min' and 'indices_max') and is adjusted by the 'pad' parameter.\n\n Args:\n iterable_or_map: A sequence of indices or a mapping from indices to values.\n background: Value assigned to the array elements not in 'iterable_or_map'.\n foreground: If 'iterable_or_map' is an iterable, the array value assigned to\n its indices.\n indices_min: For each dimension, the index coordinate that gets mapped to\n coordinate zero in the array. Replicated if an integer.\n indices_max: For each dimension, the index coordinate that gets mapped to\n the last coordinate in the array. Replicated if an integer.\n pad: For each dimension d, number of additional slices of 'background'\n values before and after the range [indices_min[d], indices_max[d]].\n dtype: Data type of the output array.\n\n Returns:\n A D-dimensional numpy array initialized with the value 'background' and\n then sparsely assigned the elements in the parameter 'iterable_or_map'\n (using 'foreground' value if an iterable, or the map values if a map).\n By default, array spans a tight bounding box of the indices, but these\n bounds can be overridden using 'indices_min', 'indices_max', and 'pad'.\n\n >>> l = [(-1, -2), (-1, 1), (1, 0)]\n >>> grid_from_indices(l)\n array([[1, 0, 0, 1],\n [0, 0, 0, 0],\n [0, 0, 1, 0]])\n\n >>> grid_from_indices(l, indices_max=(1, 2))\n array([[1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0]])\n\n >>> grid_from_indices(l, foreground='#', background='.')\n array([['#', '.', '.', '#'],\n ['.', '.', '.', '.'],\n ['.', '.', '#', '.']], dtype='<U1')\n\n >>> l = [5, -2, 1]\n >>> grid_from_indices(l, pad=1)\n array([0, 1, 0, 0, 1, 0, 0, 0, 1, 0])\n\n >>> grid_from_indices(l, indices_min=-4, indices_max=5)\n array([0, 0, 1, 0, 0, 1, 0, 0, 0, 1])\n\n >>> l = [(1, 1, 1), (2, 2, 2), (2, 1, 1)]\n >>> repr(grid_from_indices(l))\n 'array([[[1, 0],\\n [0, 0]],\\n\\n [[1, 0],\\n [0, 1]]])'\n\n >>> m = {(-1, 0): 'A', (0, 2): 'B', (1, 1): 'C'}\n >>> grid_from_indices(m, background=' ')\n array([['A', ' ', ' '],\n [' ', ' ', 'B'],\n [' ', 'C', ' ']], dtype='<U1')\n\n >>> grid_from_indices(m, background=' ', dtype='S1')\n array([[b'A', b' ', b' '],\n [b' ', b' ', b'B'],\n [b' ', b'C', b' ']], dtype='|S1')\n\n >>> grid_from_indices({(0, 0): (255, 1, 2), (1, 2): (3, 255, 4)})\n array([[[255, 1, 2],\n [ 0, 0, 0],\n [ 0, 0, 0]],\n <BLANKLINE>\n [[ 0, 0, 0],\n [ 0, 0, 0],\n [ 3, 255, 4]]])\n \"\"\"\n assert isinstance(iterable_or_map, collections.abc.Iterable)\n is_map = False\n if isinstance(iterable_or_map, collections.abc.Mapping):\n is_map = True\n mapping: Mapping[Sequence[int], Any] = iterable_or_map\n\n indices = np.array(list(iterable_or_map))\n if indices.ndim == 1:\n indices = indices[:, None]\n assert indices.ndim == 2 and np.issubdtype(indices.dtype, np.integer)\n\n def get_min_or_max_bound(f: Any, x: Any) -> _NDArray:\n return f(indices, axis=0) if x is None else np.full(indices.shape[1], x)\n\n i_min = get_min_or_max_bound(np.min, indices_min)\n i_max = get_min_or_max_bound(np.max, indices_max)\n a_pad = np.asarray(pad)\n shape = i_max - i_min + 2 * a_pad + 1\n offset = -i_min + a_pad\n elems = [next(iter(mapping.values()))] if is_map and mapping else []\n elems += [background, foreground]\n shape = (*shape, *np.broadcast(*elems).shape)\n dtype = np.array(elems[0], dtype=dtype).dtype\n grid = np.full(shape, background, dtype=dtype)\n indices += offset\n grid[tuple(indices.T)] = list(mapping.values()) if is_map else foreground\n return grid\n\n\ndef image_from_yx_map(map_yx_value: Mapping[Tuple[int, int], Any],\n background: Any,\n cmap: Mapping[Any, Tuple[numbers.Integral,\n numbers.Integral,\n numbers.Integral]],\n pad: Union[int, Sequence[int]] = 0) -> _NDArray:\n \"\"\"Return image from mapping {yx: value} and cmap = {value: rgb}.\n\n >>> m = {(2, 2): 'A', (2, 4): 'B', (1, 3): 'A'}\n >>> cmap = {'A': (100, 1, 2), 'B': (3, 200, 4), ' ': (235, 235, 235)}\n >>> image_from_yx_map(m, background=' ', cmap=cmap)\n array([[[235, 235, 235],\n [100, 1, 2],\n [235, 235, 235]],\n <BLANKLINE>\n [[100, 1, 2],\n [235, 235, 235],\n [ 3, 200, 4]]], dtype=uint8)\n \"\"\"\n array = grid_from_indices(map_yx_value, background=background, pad=pad)\n image = np.array([\n cmap[e] for e in array.flat # pylint: disable=not-an-iterable\n ], dtype=np.uint8).reshape(*array.shape, 3)\n return image\n\n\ndef fit_shape(shape: Sequence[int], num: int) -> Tuple[int, ...]:\n \"\"\"Given 'shape' with one optional -1 dimension, make it fit 'num' elements.\n\n Args:\n shape: Input dimensions. These must be positive, except that one dimension\n may be -1 to indicate that it should be computed. If all dimensions are\n positive, these must satisfy np.prod(shape) >= num.\n num: Number of elements to fit into the output shape.\n\n Returns:\n The original 'shape' if all its dimensions are positive. Otherwise, a\n new_shape where the unique dimension with value -1 is replaced by the\n smallest number such that np.prod(new_shape) >= num.\n\n >>> fit_shape((3, 4), 10)\n (3, 4)\n\n >>> fit_shape((5, 2), 11)\n Traceback (most recent call last):\n ...\n ValueError: (5, 2) is insufficiently large for 11 elements.\n\n >>> fit_shape((3, -1), 10)\n (3, 4)\n\n >>> fit_shape((-1, 10), 51)\n (6, 10)\n \"\"\"\n shape = tuple(shape)\n if not all(dim > 0 for dim in shape if dim != -1):\n raise ValueError(f'Shape {shape} has non-positive dimensions.')\n if sum(dim == -1 for dim in shape) > 1:\n raise ValueError(f'More than one dimension in {shape} is -1.')\n if -1 in shape:\n slice_size = np.prod([dim for dim in shape if dim != -1])\n shape = tuple((num + slice_size - 1) // slice_size if dim == -1 else dim\n for dim in shape)\n elif np.prod(shape) < num:\n raise ValueError(f'{shape} is insufficiently large for {num} elements.')\n return shape\n\n\ndef assemble_arrays(arrays: Sequence[_NDArray],\n shape: Sequence[int],\n background: Any = 0,\n *,\n align: str = 'center',\n spacing: Any = 0,\n round_to_even: Any = False) -> _NDArray:\n \"\"\"Return an output array formed as a packed grid of input arrays.\n\n Args:\n arrays: Sequence of input arrays with the same data type and rank. The\n arrays must have the same trailing dimensions arrays[].shape[len(shape):].\n The leading dimensions arrays[].shape[:len(shape)] may be different and\n these are packed together as a grid to form output.shape[:len(shape)].\n shape: Dimensions of the grid used to unravel the arrays before packing. The\n dimensions must be positive, with prod(shape) >= len(arrays). One\n dimension of shape may be -1, in which case it is computed automatically\n as the smallest value such that prod(shape) >= len(arrays).\n background: Broadcastable value used for the unassigned elements of the\n output array.\n align: Relative position ('center', 'start', or 'stop') for each input array\n and for each axis within its output grid cell. The value must be\n broadcastable onto the shape [len(arrays), len(shape)].\n spacing: Extra space between grid elements. The value may be specified\n per-axis, i.e., it must be broadcastable onto the shape [len(shape)].\n round_to_even: If True, ensure that the final output dimension of each axis\n is even. The value must be broadcastable onto the shape [len(shape)].\n\n Returns:\n A numpy output array of the same type as the input 'arrays', with\n output.shape = packed_shape + arrays[0].shape[len(shape):], where\n packed_shape is obtained by packing arrays[:].shape[:len(shape)] into a\n grid of the specified 'shape'.\n\n >>> assemble_arrays(\n ... [np.array([[1, 2, 3]]), np.array([[5], [6]]), np.array([[7]]),\n ... np.array([[8, 9]]), np.array([[3, 4, 5]])],\n ... shape=(2, 3))\n array([[1, 2, 3, 0, 5, 0, 7],\n [0, 0, 0, 0, 6, 0, 0],\n [8, 9, 0, 3, 4, 5, 0]])\n \"\"\"\n num = len(arrays)\n if num == 0:\n raise ValueError('There must be at least one input array.')\n shape = fit_shape(shape, num)\n if any(array.dtype != arrays[0].dtype for array in arrays):\n raise ValueError(f'Arrays {arrays} have different types.')\n tail_dims = arrays[0].shape[len(shape):]\n if any(array.shape[len(shape):] != tail_dims for array in arrays):\n raise ValueError(f'Shapes of {arrays} do not all end in {tail_dims}')\n align = np.broadcast_to(align, (num, len(shape)))\n spacing = np.broadcast_to(spacing, (len(shape)))\n round_to_even = np.broadcast_to(round_to_even, (len(shape)))\n\n # [shape] -> leading dimensions [:len(shape)] of each input array.\n head_dims = np.array([list(array.shape[:len(shape)]) for array in arrays] +\n [[0] * len(shape)] * (np.prod(shape) - num)).reshape(\n *shape, len(shape))\n\n # For each axis, find the length and position of each slice of input arrays.\n axis_lengths, axis_origins = [], []\n for axis, shape_axis in enumerate(shape):\n all_lengths = np.moveaxis(head_dims[..., axis], axis, 0)\n # Find the length of each slice along axis as the max over its arrays.\n lengths = all_lengths.max(axis=tuple(range(1, len(shape))))\n # Compute the dimension of the output axis.\n total_length = lengths.sum() + spacing[axis] * (shape_axis - 1)\n if round_to_even[axis] and total_length % 2 == 1:\n lengths[-1] += 1 # Lengthen the last slice so the axis dimension is even.\n axis_lengths.append(lengths)\n # Insert inter-element padding spaces.\n spaced_lengths = np.insert(lengths, 0, 0)\n spaced_lengths[1:-1] += spacing[axis]\n # Compute slice positions along axis as cumulative sums of slice lengths.\n axis_origins.append(spaced_lengths.cumsum())\n\n # [shape] -> smallest corner coords in output array.\n origins = np.moveaxis(np.meshgrid(*axis_origins, indexing='ij'), 0, -1)\n\n # Initialize the output array.\n output_shape = tuple(origins[(-1,) * len(shape)]) + tail_dims\n output_array = np.full(output_shape, background, dtype=arrays[0].dtype)\n\n def offset(length: int, size: int, align: str) -> int:\n \"\"\"Return an offset to align element of given size within cell of length.\"\"\"\n remainder = length - size\n if align not in ('start', 'stop', 'center'):\n raise ValueError(f'Alignment {align} is not recognized.')\n return (0 if align == 'start' else\n remainder if align == 'stop' else remainder // 2)\n\n # Copy each input array to its packed, aligned location in the output array.\n for i, array in enumerate(arrays):\n coords = np.unravel_index(i, shape)\n slices = []\n for axis in range(len(shape)):\n start = origins[coords][axis]\n length = axis_lengths[axis][coords[axis]]\n extent = array.shape[axis]\n aligned_start = start + offset(length, extent, align[i][axis])\n slices.append(slice(aligned_start, aligned_start + extent))\n output_array[tuple(slices)] = array\n\n return output_array\n\n\ndef shift(array: Any, offset: Any, constant_values: Any = 0) -> _NDArray:\n \"\"\"Return a copy of the array shifted by offset, with fill using constant.\n\n >>> array = np.arange(1, 13).reshape(3, 4)\n\n >>> shift(array, (1, 1))\n array([[0, 0, 0, 0],\n [0, 1, 2, 3],\n [0, 5, 6, 7]])\n\n >>> shift(array, (-1, -2), constant_values=-1)\n array([[ 7, 8, -1, -1],\n [11, 12, -1, -1],\n [-1, -1, -1, -1]])\n \"\"\"\n array = np.asarray(array)\n offset = np.atleast_1d(offset)\n assert offset.shape == (array.ndim,)\n new_array = np.empty_like(array)\n\n def slice_axis(o: int) -> slice:\n return slice(o, None) if o >= 0 else slice(0, o)\n\n new_array[tuple(slice_axis(o) for o in offset)] = (\n array[tuple(slice_axis(-o) for o in offset)])\n\n for axis, o in enumerate(offset):\n new_array[(slice(None),) * axis +\n (slice(0, o) if o >= 0 else slice(o, None),)] = constant_values\n\n return new_array\n\n\n## Graph algorithms\n\n\nclass UnionFind:\n \"\"\"Union-find is an efficient technique for tracking equivalence classes as\n pairs of elements are incrementally unified into the same class. See\n https://en.wikipedia.org/wiki/Disjoint-set_data_structure .\n The implementation uses path compression but without weight-balancing, so the\n worst case time complexity is O(n*log(n)), but the average case is O(n).\n\n >>> union_find = UnionFind()\n >>> union_find.find(1)\n 1\n >>> union_find.find('hello')\n 'hello'\n >>> union_find.same('hello', 'hello')\n True\n >>> union_find.same('hello', 'different')\n False\n >>> union_find.union('hello', 'there')\n >>> union_find.find('hello')\n 'hello'\n >>> union_find.find('there')\n 'hello'\n >>> union_find.same('hello', 'there')\n True\n >>> union_find.union('there', 'here')\n >>> union_find.same('hello', 'here')\n True\n \"\"\"\n\n def __init__(self) -> None:\n self._rep: Dict[Any, Any] = {}\n\n def union(self, a: Any, b: Any) -> None:\n \"\"\"Merge the equivalence class of b into that of a.\n\n >>> union_find = UnionFind()\n >>> union_find.union(1, 2)\n >>> assert union_find.same(1, 2) and not union_find.same(2, 3)\n \"\"\"\n rep_a, rep_b = self.find(a), self.find(b)\n self._rep[rep_b] = rep_a\n\n def same(self, a: Any, b: Any) -> bool:\n \"\"\"Return whether a and b are in the same equivalence class.\n\n >>> union_find = UnionFind()\n >>> assert not union_find.same((1, 2), (2, 3))\n >>> union_find.union((1, 2), (2, 3))\n >>> assert union_find.same((1, 2), (2, 3))\n \"\"\"\n result: bool = self.find(a) == self.find(b)\n return result\n\n def find(self, a: Any) -> Any:\n \"\"\"Return a representative for the class of a; valid until next union().\n\n >>> union_find = UnionFind()\n >>> union_find.union('a', 'b')\n >>> check_eq(union_find.find('a'), 'a')\n >>> check_eq(union_find.find('b'), 'a')\n >>> check_eq(union_find.find('c'), 'c')\n >>> union_find.union('d', 'a')\n >>> check_eq(union_find.find('b'), 'd')\n \"\"\"\n if a not in self._rep:\n return a\n parents = []\n while True:\n parent = self._rep.setdefault(a, a)\n if parent == a:\n break\n parents.append(a)\n a = parent\n for p in parents:\n self._rep[p] = a\n return a\n\n\ndef topological_sort(graph: Mapping[_T, Sequence[_T]],\n cycle_check: bool = False) -> List[_T]:\n \"\"\"Given a dag (directed acyclic graph), return a list of graph nodes such\n that for every directed edge (u, v) in the graph, u is before v in the list.\n See https://en.wikipedia.org/wiki/Topological_sorting and\n https://stackoverflow.com/a/47234034 .\n\n >>> graph = {2: [3], 3: [4], 1: [2], 4: []}\n >>> topological_sort(graph)\n [1, 2, 3, 4]\n\n >>> topological_sort({2: [3], 3: [4, 5], 1: [2], 4: [5], 5: []})\n [1, 2, 3, 4, 5]\n \"\"\"\n if sys.version_info > (3, 9):\n import graphlib # pylint: disable=import-error\n return list(graphlib.TopologicalSorter(graph).static_order())[::-1]\n\n result = []\n seen = set()\n\n def recurse(node: _T) -> None:\n for dependent in reversed(graph[node]):\n if dependent not in seen:\n seen.add(dependent)\n recurse(dependent)\n result.append(node)\n\n all_dependents: Set[_T] = set()\n all_dependents.update(*graph.values())\n for node in reversed(list(graph)): # (reversed(graph) in Python 3.8).\n if node not in all_dependents:\n recurse(node)\n\n if cycle_check:\n position = {node: i for i, node in enumerate(result)}\n for node, dependents in graph.items():\n for dependent in dependents:\n if position[node] < position[dependent]:\n raise ValueError('Graph contains a cycle')\n\n return result[::-1]\n\n\n## Search algorithms\n\n\ndef discrete_binary_search(feval: Callable[[Any], Any], xl: Any, xh: Any,\n y_desired: Any) -> Any:\n \"\"\"Return x such that feval(x) <= y_desired < feval(x + 1),\n\n Parameters must satisfy xl < xh and feval(xl) <= y_desired < feval(xh).\n\n >>> discrete_binary_search(lambda x: x**2, 0, 20, 15)\n 3\n >>> discrete_binary_search(lambda x: x**2, 0, 20, 16)\n 4\n >>> discrete_binary_search(lambda x: x**2, 0, 20, 17)\n 4\n >>> discrete_binary_search(lambda x: x**2, 0, 20, 24)\n 4\n >>> discrete_binary_search(lambda x: x**2, 0, 20, 25)\n 5\n \"\"\"\n assert xl < xh\n while xh - xl > 1:\n xm = (xl + xh) // 2\n ym = feval(xm)\n if y_desired >= ym:\n xl = xm\n else:\n xh = xm\n return xl\n\n\n## General I/O\n\n\ndef write_contents(path: str, data: Union[str, bytes]) -> None:\n \"\"\"Write data (either utf-8 string or bytes) to file.\n\n >>> with tempfile.TemporaryDirectory() as dir:\n ... path = pathlib.Path(dir) / 'file'\n ... write_contents(path, b'hello')\n ... check_eq(path.read_bytes(), b'hello')\n ... write_contents(path, 'hello2')\n ... check_eq(path.read_text(), 'hello2')\n \"\"\"\n bytes_data: bytes = data if isinstance(data, bytes) else data.encode()\n with open(path, 'wb') as f:\n f.write(bytes_data)\n\n\ndef is_executable(path: _Path) -> bool:\n \"\"\"Check if a file is executable.\n\n >>> with tempfile.TemporaryDirectory() as dir:\n ... path = pathlib.Path(dir) / 'file'\n ... _ = path.write_text('test')\n ... check_eq(is_executable(path), False)\n ... if sys.platform != 'cygwin':\n ... # Copy R bits to X bits:\n ... path.chmod(path.stat().st_mode | ((path.stat().st_mode & 0o444) >> 2))\n ... check_eq(is_executable(path), True)\n \"\"\"\n return bool(pathlib.Path(path).stat().st_mode & stat.S_IEXEC)\n\n\n## OS commands\n\n\ndef run(args: Union[str, Sequence[str]]) -> None:\n \"\"\"Execute command, printing output from stdout and stderr.\n\n Args:\n args: Command to execute, which can be either a string or a sequence of word\n strings, as in `subprocess.run()`. If `args` is a string, the shell is\n invoked to interpret it.\n\n Raises:\n RuntimeError: If the command's exit code is nonzero.\n\n >>> with tempfile.TemporaryDirectory() as dir:\n ... path = pathlib.Path(dir) / 'file'\n ... run(f'echo ab >{path}')\n ... assert path.is_file() and 3 <= path.stat().st_size <= 4\n \"\"\"\n proc = subprocess.run(\n args,\n shell=isinstance(args, str),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n check=False,\n universal_newlines=True)\n print(proc.stdout, end='', flush=True)\n if proc.returncode:\n raise RuntimeError(\n f\"Command '{proc.args}' failed with code {proc.returncode}.\")\n\n\nif __name__ == '__main__':\n doctest.testmod()\n" ]
[ [ "numpy.expand_dims", "numpy.asarray", "numpy.issubdtype", "numpy.isneginf", "numpy.broadcast", "numpy.zeros_like", "numpy.any", "numpy.iinfo", "numpy.moveaxis", "numpy.square", "numpy.empty_like", "numpy.full", "numpy.atleast_1d", "numpy.insert", "numpy.unravel_index", "numpy.isnan", "numpy.errstate", "numpy.meshgrid", "numpy.array", "numpy.format_float_positional", "numpy.isfinite", "numpy.isposinf", "numpy.linalg.norm", "numpy.broadcast_to", "numpy.prod", "numpy.empty" ] ]
Knowledge-Precipitation-Tribe/Neural-network
[ "eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8", "eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8" ]
[ "code/singleVariableLinearRegression/HelperClass/TrainingHistory_1_0.py", "code/NonLinearBinaryClassification/HelperClass2/TrainingHistory_2_1.py" ]
[ "# -*- coding: utf-8 -*-#\n'''\n# Name: TrainingHistory_1_0\n# Description: 记录训练过程中的训练历史\n# Author: super\n# Date: 2020/5/8\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\n\n\"\"\"\nVersion 1.0\n- to record the history of training loss and weights/bias value\n- can show the plotting\n\"\"\"\nclass TrainingHistory_1_0(object):\n def __init__(self):\n self.iteration = []\n self.loss_history = []\n self.w_history = []\n self.b_history = []\n\n def AddLossHistory(self, iteration, loss, w, b):\n self.iteration.append(iteration)\n self.loss_history.append(loss)\n self.w_history.append(w)\n self.b_history.append(b)\n\n def ShowLossHistory(self, params, xmin=None, xmax=None, ymin=None, ymax=None):\n plt.plot(self.iteration, self.loss_history)\n title = params.toString()\n plt.title(title)\n plt.xlabel(\"iteration\")\n plt.ylabel(\"loss\")\n if xmin != None and ymin != None:\n plt.axis([xmin, xmax, ymin, ymax])\n plt.show()\n return title\n\n def GetLast(self):\n count = len(self.loss_history)\n return self.loss_history[count-1], self.w_history[count-1], self.b_history[count-1]", "# -*- coding: utf-8 -*-#\n'''\n# Name: TrainingHistory_2_0\n# Description: \n# Author: super\n# Date: 2020/5/24\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\n\n# 帮助类,用于记录损失函数值极其对应的权重/迭代次数\nclass TrainingHistory_2_1(object):\n def __init__(self):\n self.loss_train = []\n self.accuracy_train = []\n self.iteration_seq = []\n self.epoch_seq = []\n self.loss_val = []\n self.accuracy_val = []\n\n def Add(self, epoch, total_iteration, loss_train, accuracy_train, loss_vld, accuracy_vld):\n self.iteration_seq.append(total_iteration)\n self.epoch_seq.append(epoch)\n self.loss_train.append(loss_train)\n self.accuracy_train.append(accuracy_train)\n if loss_vld is not None:\n self.loss_val.append(loss_vld)\n if accuracy_vld is not None:\n self.accuracy_val.append(accuracy_vld)\n\n return False\n\n # 图形显示损失函数值历史记录\n def ShowLossHistory(self, params, xmin=None, xmax=None, ymin=None, ymax=None):\n fig = plt.figure(figsize=(12, 5))\n\n axes = plt.subplot(1, 2, 1)\n # p2, = axes.plot(self.iteration_seq, self.loss_train)\n # p1, = axes.plot(self.iteration_seq, self.loss_val)\n p2, = axes.plot(self.epoch_seq, self.loss_train)\n p1, = axes.plot(self.epoch_seq, self.loss_val)\n axes.legend([p1, p2], [\"validation\", \"train\"])\n axes.set_title(\"Loss\")\n axes.set_ylabel(\"loss\")\n axes.set_xlabel(\"epoch\")\n if xmin != None or xmax != None or ymin != None or ymax != None:\n axes.axis([xmin, xmax, ymin, ymax])\n\n axes = plt.subplot(1, 2, 2)\n # p2, = axes.plot(self.iteration_seq, self.accuracy_train)\n # p1, = axes.plot(self.iteration_seq, self.accuracy_val)\n p2, = axes.plot(self.epoch_seq, self.accuracy_train)\n p1, = axes.plot(self.epoch_seq, self.accuracy_val)\n axes.legend([p1, p2], [\"validation\", \"train\"])\n axes.set_title(\"Accuracy\")\n axes.set_ylabel(\"accuracy\")\n axes.set_xlabel(\"epoch\")\n\n title = params.toString()\n plt.suptitle(title)\n plt.show()\n return title\n\n def ShowLossHistory4(self, axes, params, xmin=None, xmax=None, ymin=None, ymax=None):\n p2, = axes.plot(self.epoch_seq, self.loss_train)\n p1, = axes.plot(self.epoch_seq, self.loss_val)\n title = params.toString()\n axes.set_title(title)\n axes.set_xlabel(\"epoch\")\n axes.set_ylabel(\"loss\")\n if xmin != None and ymin != None:\n axes.axis([xmin, xmax, ymin, ymax])\n return title\n\n def Dump(self, file_name):\n f = open(file_name, 'wb')\n pickle.dump(self, f)\n\n def Load(file_name):\n f = open(file_name, 'rb')\n lh = pickle.load(f)\n return lh\n\n def GetEpochNumber(self):\n return self.epoch_seq[-1]" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.suptitle", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
oplatek/merlin
[ "df7e3494b0662fec3a2fbe4d51d056e80cc6b682" ]
[ "src/models/dnn_cm.py" ]
[ "###THEANO_FLAGS='cuda.root=/opt/cuda-5.0.35,mode=FAST_RUN,device=gpu0,floatX=float32,exception_verbosity=high' python dnn.py\n\"\"\"\n\"\"\"\nimport cPickle\nimport os\nimport sys\nimport time\n\nimport numpy# as np\nimport gnumpy as gnp\n\n#cudamat\n\n#import theano\n#import theano.tensor as T\n\nimport logging\n\nclass DNN(object):\n\n def __init__(self, numpy_rng, n_ins=100,\n n_outs=100, l1_reg = None, l2_reg = None, \n hidden_layer_sizes=[500, 500], \n hidden_activation='tanh', output_activation='linear'):\n\n logger = logging.getLogger(\"DNN initialization\")\n\n self.n_layers = len(hidden_layer_sizes)\n self.l1_reg = l1_reg\n self.l2_reg = l2_reg\n\n assert self.n_layers > 0\n\n self.W_params = []\n self.b_params = []\n self.mW_params = []\n self.mb_params = []\n \n for i in xrange(self.n_layers):\n if i == 0: \n input_size = n_ins\n else:\n input_size = hidden_layer_sizes[i-1]\n W_value = gnp.garray(numpy_rng.normal(0.0, 1.0/numpy.sqrt(input_size), size=(input_size, hidden_layer_sizes[i])))\n b_value = gnp.zeros(hidden_layer_sizes[i])\n mW_value = gnp.zeros((input_size, hidden_layer_sizes[i]))\n mb_value = gnp.zeros(hidden_layer_sizes[i])\n self.W_params.append(W_value)\n self.b_params.append(b_value)\n self.mW_params.append(mW_value)\n self.mb_params.append(mb_value)\n \n #output layer\n input_size = hidden_layer_sizes[self.n_layers-1]\n W_value = gnp.garray(numpy_rng.normal(0.0, 1.0/numpy.sqrt(input_size), size=(input_size, n_outs)))\n b_value = gnp.zeros(n_outs)\n mW_value = gnp.zeros((input_size, n_outs))\n mb_value = gnp.zeros(n_outs)\n self.W_params.append(W_value)\n self.b_params.append(b_value)\n self.mW_params.append(mW_value)\n self.mb_params.append(mb_value)\n \n def backpropagation(self, train_set_y):\n# (train_set_x, train_set_y) = train_xy\n \n # assuming linear output and square error cost function\n observation_error = self.final_layer_output - train_set_y\n\n self.W_grads = []\n self.b_grads = []\n current_error = observation_error\n current_activation = self.activations[-1]\n current_W_grad = gnp.dot(current_activation.T, observation_error)\n current_b_grad = gnp.dot(gnp.ones((1, observation_error.shape[0])), observation_error)\n self.W_grads.append(current_W_grad)\n self.b_grads.append(current_b_grad)\n\n propagate_error = gnp.dot(observation_error, self.W_params[self.n_layers].T) # final layer is linear output, gradient is one\n for i in reversed(range(self.n_layers)):\n current_activation = self.activations[i]\n current_gradient = 1.0 - current_activation ** 2\n current_W_grad = gnp.dot(current_activation.T, propagate_error)\n current_b_grad = gnp.dot(gnp.ones((1, propagate_error.shape[0])), propagate_error)\n propagate_error = gnp.dot(propagate_error, self.W_params[i].T) * current_gradient\n \n self.W_grads.insert(0, current_W_grad)\n self.b_grads.insert(0, current_b_grad)\n \n \n def feedforward(self, train_set_x):\n self.activations = []\n \n self.activations.append(train_set_x)\n \n for i in xrange(self.n_layers):\n current_activations = gnp.tanh(gnp.dot(self.activations[i], self.W_params[i]) + self.b_params[i])\n self.activations.append(current_activations)\n \n #output layers\n self.final_layer_output = gnp.dot(self.activations[self.n_layers], self.W_params[self.n_layers]) + self.b_params[self.n_layers]\n \n def gradient_update(self, batch_size, learning_rate, momentum):\n \n multiplier = learning_rate / batch_size;\n for i in xrange(len(self.W_grads)):\n \n if i >= len(self.W_grads) - 2:\n local_multiplier = multiplier * 0.5\n else:\n local_multiplier = multiplier\n \n self.W_grads[i] = (self.W_grads[i] + self.W_params[i] * self.l2_reg) * local_multiplier\n self.b_grads[i] = self.b_grads[i] * local_multiplier # + self.b_params[i] * self.l2_reg\n \n #update weights and record momentum weights\n self.mW_params[i] = (self.mW_params[i] * momentum) - self.W_grads[i]\n self.mb_params[i] = (self.mb_params[i] * momentum) - self.b_grads[i]\n self.W_params[i] += self.mW_params[i]\n self.b_params[i] += self.mb_params[i]\n# print self.W_params[0].shape, self.W_params[len(self.W_params)-1].shape\n \n def finetune(self, train_xy, batch_size, learning_rate, momentum):\n (train_set_x, train_set_y) = train_xy\n \n train_set_x = gnp.as_garray(train_set_x)\n train_set_y = gnp.as_garray(train_set_y)\n \n self.feedforward(train_set_x)\n self.backpropagation(train_set_y)\n self.gradient_update(batch_size, learning_rate, momentum)\n \n self.errors = gnp.sum((self.final_layer_output - train_set_y) ** 2, axis=1)\n \n return self.errors.as_numpy_array()\n\n def parameter_prediction(self, test_set_x):\n test_set_x = gnp.as_garray(test_set_x)\n \n current_activations = test_set_x\n \n for i in xrange(self.n_layers):\n current_activations = gnp.tanh(gnp.dot(current_activations, self.W_params[i]) + self.b_params[i])\n \n final_layer_output = gnp.dot(current_activations, self.W_params[self.n_layers]) + self.b_params[self.n_layers]\n \n return final_layer_output.as_numpy_array()\n\n# def parameter_prediction(self, test_set_x): #, batch_size\n\n# n_test_set_x = test_set_x.get_value(borrow=True).shape[0]\n\n# test_out = theano.function([], self.final_layer.output,\n# givens={self.x: test_set_x[0:n_test_set_x]})\n# predict_parameter = test_out()\n# return predict_parameter\n \n\nif __name__ == '__main__':\n\n train_scp = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nn_scp/train.scp'\n valid_scp = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nn_scp/gen.scp'\n\t\n model_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/practice/nnets_model'\n\t\n log_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/practice/log' \n\n finetune_lr=0.01\n pretraining_epochs=100\n pretrain_lr=0.01\n training_epochs=100\n batch_size=32\n\n n_ins = 898\n n_outs = 229\n \n hidden_layer_sizes = [512, 512, 512]\n \n# test_DBN(train_scp, valid_scp, log_dir, model_dir, n_ins, n_outs, hidden_layer_sizes, \n# finetune_lr, pretraining_epochs, pretrain_lr, training_epochs, batch_size)\n\n dnn_generation()\n\n" ]
[ [ "numpy.sqrt" ] ]
kruppt/wtfml
[ "1c13fc3fa850fe30f32707cf75fadc500c872284" ]
[ "wtfml/engine/engine.py" ]
[ "import torch\nfrom tqdm import tqdm\nfrom ..utils import AverageMeter\n\ntry:\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.parallel_loader as pl\n _xla_available = True\nexcept ImportError:\n _xla_available = False\n\ntry:\n from apex import amp\n\n _apex_available = True\nexcept ImportError:\n _apex_available = False\n \n\n\ndef reduce_fn(vals):\n return sum(vals) / len(vals)\n\n\nclass Engine:\n @staticmethod\n def train(\n data_loader,\n model,\n optimizer,\n device,\n scheduler=None,\n accumulation_steps=1,\n use_tpu=False,\n fp16=False,\n ):\n if use_tpu and not _xla_available:\n raise Exception(\n \"You want to use TPUs but you dont have pytorch_xla installed\"\n )\n if fp16 and not _apex_available:\n raise Exception(\"You want to use fp16 but you dont have apex installed\")\n if fp16 and use_tpu:\n raise Exception(\"Apex fp16 is not available when using TPUs\")\n if fp16:\n accumulation_steps = 1\n losses = AverageMeter()\n predictions = []\n model.train()\n if accumulation_steps > 1:\n optimizer.zero_grad()\n if use_tpu:\n para_loader = pl.ParallelLoader(data_loader, [device])\n tk0 = tqdm(\n para_loader.per_device_loader(device), \n total=len(data_loader),\n disable=xm.get_ordinal()==0\n )\n else:\n tk0 = tqdm(data_loader, total=len(data_loader))\n\n for b_idx, data in enumerate(tk0):\n for key, value in data.items():\n data[key] = value.to(device)\n if accumulation_steps == 1 and b_idx == 0:\n optimizer.zero_grad()\n _, loss = model(**data)\n\n if not use_tpu:\n with torch.set_grad_enabled(True):\n if fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n if (b_idx + 1) % accumulation_steps == 0:\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n if b_idx > 0:\n optimizer.zero_grad()\n else:\n loss.backward()\n xm.optimizer_step(optimizer)\n if scheduler is not None:\n scheduler.step()\n if b_idx > 0:\n optimizer.zero_grad()\n if use_tpu:\n reduced_loss = xm.mesh_reduce('loss_reduce', loss, reduce_fn)\n losses.update(reduced_loss.item(), data_loader.batch_size)\n else:\n losses.update(loss.item(), data_loader.batch_size)\n \n tk0.set_postfix(loss=losses.avg)\n return losses.avg\n\n @staticmethod\n def evaluate(data_loader, model, device, use_tpu=False):\n losses = AverageMeter()\n final_predictions = []\n final_targets = []\n model.eval()\n with torch.no_grad():\n if use_tpu:\n para_loader = pl.ParallelLoader(data_loader, [device])\n tk0 = tqdm(\n para_loader.per_device_loader(device), \n total=len(data_loader),\n disable=xm.get_ordinal()==0\n )\n else:\n tk0 = tqdm(data_loader, total=len(data_loader))\n for b_idx, data in enumerate(tk0):\n for key, value in data.items():\n data[key] = value.to(device)\n _, loss = model(**data)\n if use_tpu:\n reduced_loss = xm.mesh_reduce('loss_reduce', loss, reduce_fn)\n losses.update(reduced_loss.item(), data_loader.batch_size)\n else:\n losses.update(loss.item(), data_loader.batch_size)\n tk0.set_postfix(loss=losses.avg)\n return losses.avg\n\n @staticmethod\n def predict(data_loader, model, device, use_tpu=False):\n model.eval()\n final_predictions = []\n if use_tpu:\n raise Exception(\"TPU not available for predict yet!\")\n with torch.no_grad():\n tk0 = tqdm(data_loader, total=len(data_loader))\n for b_idx, data in enumerate(tk0):\n for key, value in data.items():\n data[key] = value.to(device)\n predictions, _ = model(**data)\n predictions = predictions.cpu()\n final_predictions.append(predictions)\n return final_predictions\n" ]
[ [ "torch.set_grad_enabled", "torch.no_grad" ] ]
Erikx3/2D_Pygame_Drone_Simulation
[ "e67fdf9d030a3e68448e521818f557622293284f" ]
[ "tools/line_intersect.py" ]
[ "import numpy as np\n\n# https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines\n\n\ndef line_intersection(line1, line2):\n \"\"\"\n\n :param line1: line defined by two points, e.g. [[1,1], [2,2]]\n :param line2: second line\n :return: point of intersection,\n \"\"\"\n\n # Functions to check, whether line intersect at all:\n def ccw(A, B, C):\n return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])\n\n # Return true if line segments AB and CD intersect\n def intersect(A, B, C, D):\n return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)\n\n if intersect(line1[0], line1[1], line2[0], line2[1]):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n # Fallback, if intersect did not work\n if div == 0:\n return None\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return [x, y]\n else:\n return None\n\n\ndef distance(p1, p2):\n \"\"\"\n\n :param p1: point, e.g. [1,2]\n :param p2: second point\n :return: distance between these two points\n \"\"\"\n return np.sqrt((p2[0] - p1[0])**2 + (p2[1]-p1[1])**2)\n" ]
[ [ "numpy.sqrt" ] ]
ShubhangDesai/nn-micro-framework
[ "1d980e653ed480a17b7a4158de5638ee70b6ae40" ]
[ "krikos/nn/network.py" ]
[ "import numpy as np\n\nfrom krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout\n\n\nclass Network(object):\n def __init__(self):\n super(Network, self).__init__()\n self.diff = (BatchNorm, BatchNorm2d, Dropout)\n\n def train(self, input, target):\n raise NotImplementedError\n\n def eval(self, input):\n raise NotImplementedError\n\n\nclass Sequential(Network):\n def __init__(self, layers, loss, lr, regularization=None):\n super(Sequential, self).__init__()\n self.layers = layers\n self.loss = loss\n self.lr = lr\n self.regularization = regularization\n\n def train(self, input, target):\n layers = self.layers\n loss = self.loss\n regularization = self.regularization\n\n l = 0\n for layer in layers:\n if isinstance(layer, self.diff):\n layer.mode = \"train\"\n\n input = layer.forward(input)\n if regularization is not None:\n for _, param in layer.params.items():\n l += regularization.forward(param)\n\n l += loss.forward(input, target)\n dout = loss.backward()\n\n for layer in reversed(layers):\n dout = layer.backward(dout)\n\n for param, grad in layer.grads.items():\n if regularization is not None:\n grad += regularization.backward(layer.params[param])\n layer.params[param] -= self.lr * grad\n\n return np.argmax(input, axis=1), l\n\n def eval(self, input):\n layers = self.layers\n\n for layer in layers:\n if isinstance(layer, self.diff):\n layer.mode = \"test\"\n\n input = layer.forward(input)\n\n return np.argmax(input, axis=1)" ]
[ [ "numpy.argmax" ] ]
celiolarcher/AUTOCVE
[ "b69e8d0a35d838a3ef5fd36516f9e4c61610ce53", "b69e8d0a35d838a3ef5fd36516f9e4c61610ce53" ]
[ "util/scoring_handler.py", "util/make_pipeline.py" ]
[ "from sklearn import metrics\n\ndef load_scoring(score_call):\n if isinstance(score_call,str):\n if score_call in metrics.SCORERS:\n score_call=metrics.get_scorer(score_call)\n else:\n raise Exception(\"Keyword isn't in metric.SCORES\")\n elif not callable(score_call):\n raise Exception(\"Invalid value for scoring parameter\")\n \n return score_call\n\n", "import sklearn.pipeline as Pipeline\nimport importlib\nimport ast\nfrom sklearn.ensemble import VotingClassifier\n\ndef make_pipeline_str(pipeline_str,verbose=1):\n if pipeline_str is None:\n return None\n\n list_pip_string=split_methods_pip(pipeline_str)\n list_pip_methods=[]\n\n for method_str in list_pip_string:\n method_str=method_str.split(\"(\",1)\n\n method_call_str=method_str[0].split(\"/\")\n\n attr_list_str=[]\n attr_list_str=split_attr(method_str[1][0:len(method_str[1])-1])\n\n try:\n imported_lib=importlib.import_module(method_call_str[0])\n kwargs_method={}\n \n for attr in attr_list_str:\n attr=attr.split(\"=\",1)\n\n if attr[1].find(\"/\")!=-1:\n if attr[1].find(\"(\") != -1:\n pip_attr=make_pipeline_str(attr[1])\n if pip_attr is None:\n return None\n kwargs_method[attr[0].strip()]=pip_attr\n else:\n attr_imported=attr[1].split(\"/\")\n imported_lib_attr=importlib.import_module(attr_imported[0])\n attr_imported=getattr(imported_lib_attr,attr_imported[1])\n kwargs_method[attr[0].strip()]=attr_imported\n\n else:\n kwargs_method[attr[0].strip()]=ast.literal_eval(attr[1])\n\n method=getattr(imported_lib,method_call_str[1])(**kwargs_method)\n\n if hasattr(method, 'n_jobs'):\n method.n_jobs=1\n\n if hasattr(method, 'nthread'):\n method.nthread=1\n\n list_pip_methods.append(method)\n except Exception as e:\n if verbose>0:\n print(\"Load method error: \"+str(method_str))\n print(str(e))\n return None\n\n #If there is justs one method, return it outside a pipeline (RFE methods doesn't work otherwise)\n if len(list_pip_methods)==1:\n return list_pip_methods[0]\n\n try:\n pipeline=Pipeline.make_pipeline(*list_pip_methods)\n except Exception as e:\n if verbose>0:\n print(\"Pipeline definition error: \"+str(list_pip_string))\n print(str(e))\n return None\n\n return pipeline\n\n\n\ndef split_attr(string_attr):\n split_string=[]\n count_nested=0\n index_start=0\n index_end=0\n for index_end,c in enumerate(string_attr):\n if c in ['(','[']:\n count_nested+=1\n if c in [')',']']:\n count_nested-=1\n\n if c==',' and count_nested==0:\n split_string.append(string_attr[index_start:index_end])\n index_start=index_end+1\n\n if count_nested<0:\n index_end-=1\n break\n \n if(string_attr.strip()!=\"\"):\n split_string.append(string_attr[index_start:index_end+1])\n\n return split_string\n\ndef split_methods_pip(string_pip):\n split_string=[]\n count_nested=0\n index_start=0\n index_end=0\n for index_end,c in enumerate(string_pip):\n if c in ['(','[']:\n count_nested+=1\n if c in [')',']']:\n count_nested-=1\n\n if string_pip[index_end]=='-' and string_pip[index_end+1]=='>' and count_nested==0:\n split_string.append(string_pip[index_start:index_end])\n index_start=index_end+2\n \n if count_nested<0:\n index_end-=1\n break\n \n split_string.append(string_pip[index_start:index_end+1])\n\n return split_string\n \n\ndef make_voting_ensemble(pipelines_population):\n if pipelines_population is None:\n return None\n\n pipelines_population=pipelines_population.split(\"|\")\n pipeline_list=[]\n\n for id_pip, pipeline_str in enumerate(pipelines_population):\n pipeline=make_pipeline_str(pipeline_str)\n if pipeline is not None:\n pipeline_list.append((\"Pipe_\"+str(id_pip),pipeline))\n\n return VotingClassifier(estimators=pipeline_list) \n\n\n\n\n" ]
[ [ "sklearn.metrics.get_scorer" ], [ "sklearn.pipeline.make_pipeline", "sklearn.ensemble.VotingClassifier" ] ]
luziqing/CNN-for-ASI
[ "90b29f91c9ed98323d76628922d34cf94ea6a1bd" ]
[ "train.py" ]
[ "# Compatability Imports\nfrom __future__ import print_function\nfrom os.path import join\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom data import readSEGY, readLabels, get_slice\nfrom batch import get_random_batch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, Dataset\nimport tb_logger\n\n\n\n\nimport numpy as np\nfrom utils import *\n#This is the network definition proposed in the paper\n\n#Parameters\ndataset_name = 'F3'\nim_size = 65\nbatch_size = 32 #If you have a GPU with little memory, try reducing this to 16 (may degrade results)\nuse_gpu = True #Switch to toggle the use of GPU or not\nlog_tensorboard = True #Log progress on tensor board\nif log_tensorboard: logger = tb_logger.TBLogger('log', 'Train')\n\n#See the texture_net.py file for the network configuration\nfrom texture_net import TextureNet\nnetwork = TextureNet(n_classes=2)\n\n#Loss function\ncross_entropy = nn.CrossEntropyLoss() #Softmax function is included\n\n#Optimizer to control step size in gradient descent\noptimizer = torch.optim.Adam(network.parameters())\n\n#Transfer model to gpu\nif use_gpu:\n network = network.cuda()\n\n#Load the data cube and labels\ndata, data_info = readSEGY(join(dataset_name,'data.segy'))\ntrain_class_imgs, train_coordinates = readLabels(join(dataset_name,'train'), data_info)\nval_class_imgs, _ = readLabels(join(dataset_name,'val'), data_info)\n\n#Plot training/validation data with labels\nif log_tensorboard:\n for class_img in train_class_imgs + val_class_imgs:\n logger.log_images(class_img[1] + '_' + str(class_img[2] ), get_slice(data, data_info, class_img[1], class_img[2]), cm='gray')\n logger.log_images(class_img[1] + '_' + str(class_img[2]) + '_true_class', class_img[0])\n\n\n# Training loop\nfor i in range(2000):\n\n # Get random training batch with augmentation\n # This is the bottle-neck for training and could be done more efficient on the GPU...\n [batch, labels] = get_random_batch(data, train_coordinates, im_size, batch_size,\n random_flip=True,\n random_stretch=.2,\n random_rot_xy=180,\n random_rot_z=15)\n\n\n #Format data to torch-variable\n batch = Variable( torch.Tensor(batch).float() )\n labels = Variable( torch.Tensor(labels).long() )\n\n # Transfer data to gpu\n if use_gpu:\n batch = batch.cuda()\n labels = labels.cuda()\n\n #Set network to training phase\n network.train()\n\n #Run the samples through the network\n output = network(batch)\n\n #Compute loss\n loss = cross_entropy( torch.squeeze(output) , labels)\n\n # Do back-propagation to get gradients of weights w.r.t. loss\n loss.backward()\n\n # Ask the optimizer to adjust the parameters in the direction of lower loss\n optimizer.step()\n\n # Every 10th iteration - print training loss\n if i % 10 == 0:\n network.eval()\n\n #Log to training loss/acc\n print('Iteration:', i, 'Training loss:', var_to_np(loss))\n if log_tensorboard:\n logger.log_scalar('training_loss', var_to_np(loss).tolist(),i)\n for k,v in computeAccuracy(torch.argmax(output,1), labels).items():\n if log_tensorboard:\n logger.log_scalar('training_' + k, v, i)\n print(' -',k,v,'%')\n\n #every 100th iteration\n if i % 100 == 0 and log_tensorboard:\n network.eval()\n\n # Output predicted train/validation class/probability images\n for class_img in train_class_imgs + val_class_imgs:\n\n slice = class_img[1]\n slice_no = class_img[2]\n\n class_img = interpret(network.classify, data, data_info, slice, slice_no, im_size, 16, return_full_size=True, use_gpu=use_gpu)\n logger.log_images( slice + '_' + str(slice_no)+ '_pred_class', class_img, i)\n\n class_img = interpret(network, data, data_info, slice, slice_no, im_size, 16, return_full_size=True, use_gpu=use_gpu)\n logger.log_images( slice + '_' + str(slice_no) + '_pred_prob', class_img, i)\n\n #Store trained network\n torch.save(network.state_dict(), join(dataset_name, 'saved_model.pt'))\n\n\n\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.Tensor", "torch.squeeze", "torch.argmax" ] ]
xpersky/whatsthat
[ "2857a0e829d39fd591f90a3f7001f7ea78593556" ]
[ "app/views.py" ]
[ "import os\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nimport tensorflow as tf\nimport datetime\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.db.models import Count\nfrom django.db.models.functions import TruncMonth\n\nfrom .models import TrainImagesOfCancer, TrainImagesNotCancer, Usage\n\ngraph = tf.get_default_graph()\nmodel = load_model(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'whatsthat.h5'))\n\n# Render template\n\ndef index(request):\n return render(request,'app/home.html')\n\n# Respond to user request\n\ndef check(request):\n if request.method == 'POST':\n if request.is_ajax():\n img = prepare_image(request)\n with graph.as_default():\n result = model.predict(img)\n output = fetch_result(result,request)\n return JsonResponse(output, safe=False)\n\ndef prepare_image(req):\n img = cv2.imdecode(np.fromstring(req.FILES['image'].read(), np.uint8), cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img,(224,224), interpolation = cv2.INTER_CUBIC)\n img = np.expand_dims(img,axis=0)/255\n return img\n\ndef fetch_result(res,req):\n pred_yes = res[0][0]\n pred_not = res[0][1]\n if pred_yes > pred_not:\n diag = 'cancer'\n prob = str(round(pred_yes,4))\n instance = TrainImagesOfCancer()\n instance.image = req.FILES['image']\n instance.save()\n else:\n diag = 'not cancer'\n prob = str(round(pred_not,4))\n instance = TrainImagesNotCancer()\n instance.image = req.FILES['image']\n instance.save()\n usage = Usage()\n usage.result = diag\n usage.save()\n return [diag,prob]\n\ndef stats(request):\n if request.method == 'GET':\n if request.is_ajax():\n usage = Usage.objects.annotate(month=TruncMonth('date')).values('month').annotate(total=Count('id'))\n usages = []\n today = datetime.date.today()\n for monthly in usage:\n delta = today - monthly['month']\n if delta.days < 365:\n usages.append((monthly['month'].month,monthly['total']))\n result = []\n for i in range(12):\n ct = 0\n for item in usages:\n m , t = item\n if i == m:\n result.append(t)\n else: \n ct += 1\n if ct == len(usages):\n result.append(0)\n return JsonResponse(result, safe=False) \n" ]
[ [ "tensorflow.get_default_graph", "numpy.expand_dims" ] ]
amarufd/MLflowAndAWSSagemaker
[ "c90cfea48acb1d42422fc810b61063e01721e21a" ]
[ "cargando_dataset.py" ]
[ "import mlflow\nimport pandas as pd\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_dataset_to_mlflow(path, sep, test_size=0.2, random_state=123):\n # carga el dataset\n data = pd.read_csv(path, sep=sep)\n\n # se procesa el dataset\n x,y = procesando_dataset(data)\n \n # almacena caracteristicas del dataset en MLflow\n mlflow.log_param(\"dataset_path\", path)\n mlflow.log_param(\"dataset_shape\", data.shape)\n mlflow.log_param(\"test_size\", test_size)\n mlflow.log_param(\"random_state\", random_state)\n mlflow.log_param(\"one_hot_encoding\", True)\n return train_test_split(x, y, test_size=test_size, random_state=random_state)\n\ndef load_dataset(path, sep, test_size=0.2, random_state=123):\n # carga el dataset\n data = pd.read_csv(path, sep=sep)\n\n # se procesa el dataset\n x,y = procesando_dataset(data)\n\n return train_test_split(x, y, test_size=test_size, random_state=random_state)\n\ndef procesando_dataset(datos):\n # Transformar string a date para hacer resta\n X = datos.columns[1:].to_series().apply(lambda x: datetime.strptime(x, \"%Y-%m-%d\"))\n # Restar a todos los días la fecha inicial para obtener el transcurso de dias\n X = X - X[0]\n # Obtener los dias de diferencia\n X = X.apply(lambda x: x.days)\n X = X.to_frame()\n X.columns = [\"dias_transcurridos\"]\n X[\"Dias2\"] = X.apply(lambda x: x**2)\n \n Y_aux = datos.iloc[1, 1:]\n y = []\n for i in range(0, len(Y_aux)):\n y.append(Y_aux[i])\n y = pd.DataFrame(y)\n y.columns = [\"total_contagiados\"]\n y.index = X.index\n\n return X,y\n\nif __name__ == '__main__':\n x_train, x_test, y_train, y_test = load_dataset_to_mlflow(\n 'datos/TotalesNacionales.csv', ','\n )\n print(x_train.head())\n print(y_train.head())\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "pandas.DataFrame" ] ]
yukiTakezawa/GraphIsomorphismNetwork
[ "df6c907f90fb558062a619bd367f1cd863d7f96a" ]
[ "src/gnn.py" ]
[ "import numpy as np\nimport torch\nfrom molecule import *\nimport copy\n\nclass GraphIsomorphismNetwork:\n def __init__(self, node_dim, update_loop_size):\n self.node_dim = node_dim\n self.update_loop_size = update_loop_size\n self.eps = 0.0 #np.random.normal() # learnable parameter \n\n # function to update nodes\n def mlp(self, molecule):\n next_nodes = torch.zeros_like(molecule.nodes)\n #for i in range(next_nodes.shape[0]):\n # next_nodes[i] = (torch.t(molecule.graph[i]) * molecule.nodes).sum(dim=1)\n #return (1.0 + eps)*molecule.nodes + next_nodes\n return molecule.nodes + torch.mm(molecule.graph, molecule.nodes)\n \n def readout(self, molecule):\n return molecule.nodes.sum(dim=0)\n\n def predict(self, molecule):\n tmp_molecule = copy.deepcopy(molecule)\n # CONCAT(READOUT(molecule.nodes at k) k < update_loop_size)\n sum_of_nodes = torch.zeros(self.node_dim).to('cuda') \n for i in range(self.update_loop_size):\n tmp_molecule.nodes = self.mlp(tmp_molecule) \n sum_of_nodes += self.readout(tmp_molecule)\n\n return sum_of_nodes\n" ]
[ [ "torch.mm", "torch.zeros_like", "torch.zeros" ] ]
AgentMaker/PAPC
[ "c9a0a284208a4cb508467ee3c9e683d802b868cc" ]
[ "PAPC/models/detect/pointpillars/libs/ops/sample_ops.py" ]
[ "import pathlib\nimport pickle\nimport time\nfrom functools import partial, reduce\n\nimport numpy as np\nfrom skimage import io as imgio\n\nfrom libs import preprocess as prep\nfrom libs.ops import box_np_ops\nfrom data import kitti_common as kitti\nimport copy\n\nfrom libs.tools.check import shape_mergeable\n\nclass DataBaseSamplerV2:\n def __init__(self, db_infos, groups, db_prepor=None,\n rate=1.0, global_rot_range=None):\n for k, v in db_infos.items():\n print(f\"load {len(v)} {k} database infos\")\n\n if db_prepor is not None:\n db_infos = db_prepor(db_infos)\n print(\"After filter database:\")\n for k, v in db_infos.items():\n print(f\"load {len(v)} {k} database infos\")\n\n self.db_infos = db_infos\n self._rate = rate\n self._groups = groups\n self._group_db_infos = {}\n self._group_name_to_names = []\n self._sample_classes = []\n self._sample_max_nums = []\n self._use_group_sampling = False # slower\n if any([len(g) > 1 for g in groups]):\n self._use_group_sampling = True\n if not self._use_group_sampling:\n self._group_db_infos = self.db_infos # just use db_infos\n for group_info in groups:\n group_names = list(group_info.keys())\n self._sample_classes += group_names\n self._sample_max_nums += list(group_info.values())\n else:\n for group_info in groups:\n group_dict = {}\n group_names = list(group_info.keys())\n group_name = \", \".join(group_names)\n self._sample_classes += group_names\n self._sample_max_nums += list(group_info.values())\n self._group_name_to_names.append((group_name, group_names))\n # self._group_name_to_names[group_name] = group_names\n for name in group_names:\n for item in db_infos[name]:\n gid = item[\"group_id\"]\n if gid not in group_dict:\n group_dict[gid] = [item]\n else:\n group_dict[gid] += [item]\n if group_name in self._group_db_infos:\n raise ValueError(\"group must be unique\")\n group_data = list(group_dict.values())\n self._group_db_infos[group_name] = group_data\n info_dict = {}\n if len(group_info) > 1:\n for group in group_data:\n names = [item[\"name\"] for item in group]\n names = sorted(names)\n group_name = \", \".join(names)\n if group_name in info_dict:\n info_dict[group_name] += 1\n else:\n info_dict[group_name] = 1\n print(info_dict)\n\n\n self._sampler_dict = {}\n for k, v in self._group_db_infos.items():\n self._sampler_dict[k] = prep.BatchSampler(v, k)\n self._enable_global_rot = False\n if global_rot_range is not None:\n if not isinstance(global_rot_range, (list, tuple, np.ndarray)):\n global_rot_range = [-global_rot_range, global_rot_range]\n else:\n assert shape_mergeable(global_rot_range, [2])\n if np.abs(global_rot_range[0] -\n global_rot_range[1]) >= 1e-3:\n self._enable_global_rot = True\n self._global_rot_range = global_rot_range\n\n @property\n def use_group_sampling(self):\n return self._use_group_sampling\n\n def sample_all(self,\n root_path,\n gt_boxes,\n gt_names,\n num_point_features,\n random_crop=False,\n gt_group_ids=None,\n rect=None,\n Trv2c=None,\n P2=None):\n sampled_num_dict = {}\n sample_num_per_class = []\n for class_name, max_sample_num in zip(self._sample_classes,\n self._sample_max_nums):\n sampled_num = int(max_sample_num -\n np.sum([n == class_name for n in gt_names]))\n sampled_num = np.round(self._rate * sampled_num).astype(np.int64)\n sampled_num_dict[class_name] = sampled_num\n sample_num_per_class.append(sampled_num)\n\n sampled_groups = self._sample_classes\n if self._use_group_sampling:\n assert gt_group_ids is not None\n sampled_groups = []\n sample_num_per_class = []\n for group_name, class_names in self._group_name_to_names:\n sampled_nums_group = [sampled_num_dict[n] for n in class_names]\n sampled_num = np.max(sampled_nums_group)\n sample_num_per_class.append(sampled_num)\n sampled_groups.append(group_name)\n total_group_ids = gt_group_ids\n sampled = []\n sampled_gt_boxes = []\n avoid_coll_boxes = gt_boxes\n \n for class_name, sampled_num in zip(sampled_groups,\n sample_num_per_class):\n if sampled_num > 0:\n if self._use_group_sampling:\n sampled_cls = self.sample_group(class_name, sampled_num,\n avoid_coll_boxes, total_group_ids)\n else:\n sampled_cls = self.sample_class_v2(class_name, sampled_num,\n avoid_coll_boxes)\n\n sampled += sampled_cls\n if len(sampled_cls) > 0:\n if len(sampled_cls) == 1:\n sampled_gt_box = sampled_cls[0][\"box3d_lidar\"][\n np.newaxis, ...]\n else:\n sampled_gt_box = np.stack(\n [s[\"box3d_lidar\"] for s in sampled_cls], axis=0)\n\n sampled_gt_boxes += [sampled_gt_box]\n avoid_coll_boxes = np.concatenate(\n [avoid_coll_boxes, sampled_gt_box], axis=0)\n if self._use_group_sampling:\n if len(sampled_cls) == 1:\n sampled_group_ids = np.array(sampled_cls[0][\"group_id\"])[np.newaxis, ...]\n else:\n sampled_group_ids = np.stack(\n [s[\"group_id\"] for s in sampled_cls], axis=0)\n total_group_ids = np.concatenate(\n [total_group_ids, sampled_group_ids], axis=0)\n\n if len(sampled) > 0:\n sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)\n num_sampled = len(sampled)\n s_points_list = []\n for info in sampled:\n s_points = np.fromfile(\n str(pathlib.Path(root_path) / info[\"path\"]),\n dtype=np.float32)\n s_points = s_points.reshape([-1, num_point_features])\n # if not add_rgb_to_points:\n # s_points = s_points[:, :4]\n if \"rot_transform\" in info:\n rot = info[\"rot_transform\"]\n s_points[:, :3] = box_np_ops.rotation_points_single_angle(\n s_points[:, :3], rot, axis=2)\n s_points[:, :3] += info[\"box3d_lidar\"][:3]\n s_points_list.append(s_points)\n # print(pathlib.Path(info[\"path\"]).stem)\n # gt_bboxes = np.stack([s[\"bbox\"] for s in sampled], axis=0)\n # if np.random.choice([False, True], replace=False, p=[0.3, 0.7]):\n # do random crop.\n if random_crop:\n s_points_list_new = []\n gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect,\n Trv2c, P2)\n crop_frustums = prep.random_crop_frustum(\n gt_bboxes, rect, Trv2c, P2)\n for i in range(crop_frustums.shape[0]):\n s_points = s_points_list[i]\n mask = prep.mask_points_in_corners(\n s_points, crop_frustums[i:i + 1]).reshape(-1)\n num_remove = np.sum(mask)\n if num_remove > 0 and (\n s_points.shape[0] - num_remove) > 15:\n s_points = s_points[np.logical_not(mask)]\n s_points_list_new.append(s_points)\n s_points_list = s_points_list_new\n ret = {\n \"gt_names\": np.array([s[\"name\"] for s in sampled]),\n \"difficulty\": np.array([s[\"difficulty\"] for s in sampled]),\n \"gt_boxes\": sampled_gt_boxes,\n \"points\": np.concatenate(s_points_list, axis=0),\n \"gt_masks\": np.ones((num_sampled, ), dtype=np.bool_)\n }\n if self._use_group_sampling:\n ret[\"group_ids\"] = np.array([s[\"group_id\"] for s in sampled])\n else:\n ret[\"group_ids\"] = np.arange(gt_boxes.shape[0], gt_boxes.shape[0] + len(sampled))\n else:\n ret = None\n return ret\n\n def sample(self, name, num):\n if self._use_group_sampling:\n group_name = name\n ret = self._sampler_dict[group_name].sample(num)\n groups_num = [len(l) for l in ret]\n return reduce(lambda x, y: x + y, ret), groups_num\n else:\n ret = self._sampler_dict[name].sample(num)\n return ret, np.ones((len(ret), ), dtype=np.int64)\n\n def sample_v1(self, name, num):\n if isinstance(name, (list, tuple)):\n group_name = \", \".join(name)\n ret = self._sampler_dict[group_name].sample(num)\n groups_num = [len(l) for l in ret]\n return reduce(lambda x, y: x + y, ret), groups_num\n else:\n ret = self._sampler_dict[name].sample(num)\n return ret, np.ones((len(ret), ), dtype=np.int64)\n\n\n def sample_class_v2(self, name, num, gt_boxes):\n sampled = self._sampler_dict[name].sample(num)\n sampled = copy.deepcopy(sampled)\n num_gt = gt_boxes.shape[0]\n num_sampled = len(sampled)\n gt_boxes_bv = box_np_ops.center_to_corner_box2d(\n gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, 6])\n\n sp_boxes = np.stack([i[\"box3d_lidar\"] for i in sampled], axis=0)\n\n valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)\n valid_mask = np.concatenate(\n [valid_mask,\n np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0)\n boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()\n if self._enable_global_rot:\n # place samples to any place in a circle.\n prep.noise_per_object_v3_(\n boxes,\n None,\n valid_mask,\n 0,\n 0,\n self._global_rot_range,\n num_try=100)\n sp_boxes_new = boxes[gt_boxes.shape[0]:]\n sp_boxes_bv = box_np_ops.center_to_corner_box2d(\n sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6])\n\n total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)\n # coll_mat = collision_test_allbox(total_bv)\n coll_mat = prep.box_collision_test(total_bv, total_bv)\n diag = np.arange(total_bv.shape[0])\n coll_mat[diag, diag] = False\n\n valid_samples = []\n for i in range(num_gt, num_gt + num_sampled):\n if coll_mat[i].any():\n coll_mat[i] = False\n coll_mat[:, i] = False\n else:\n if self._enable_global_rot:\n sampled[i - num_gt][\"box3d_lidar\"][:2] = boxes[i, :2]\n sampled[i - num_gt][\"box3d_lidar\"][-1] = boxes[i, -1]\n sampled[i - num_gt][\"rot_transform\"] = (\n boxes[i, -1] - sp_boxes[i - num_gt, -1])\n valid_samples.append(sampled[i - num_gt])\n return valid_samples\n\n def sample_group(self, name, num, gt_boxes, gt_group_ids):\n sampled, group_num = self.sample(name, num)\n sampled = copy.deepcopy(sampled)\n # rewrite sampled group id to avoid duplicated with gt group ids\n gid_map = {}\n max_gt_gid = np.max(gt_group_ids)\n sampled_gid = max_gt_gid + 1\n for s in sampled:\n gid = s[\"group_id\"]\n if gid in gid_map:\n s[\"group_id\"] = gid_map[gid]\n else:\n gid_map[gid] = sampled_gid\n s[\"group_id\"] = sampled_gid\n sampled_gid += 1\n \n num_gt = gt_boxes.shape[0]\n gt_boxes_bv = box_np_ops.center_to_corner_box2d(\n gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, 6])\n\n sp_boxes = np.stack([i[\"box3d_lidar\"] for i in sampled], axis=0)\n sp_group_ids = np.stack([i[\"group_id\"] for i in sampled], axis=0)\n valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)\n valid_mask = np.concatenate(\n [valid_mask,\n np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0)\n boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()\n group_ids = np.concatenate([gt_group_ids, sp_group_ids], axis=0)\n if self._enable_global_rot:\n # place samples to any place in a circle.\n prep.noise_per_object_v3_(\n boxes,\n None,\n valid_mask,\n 0,\n 0,\n self._global_rot_range,\n group_ids=group_ids,\n num_try=100)\n sp_boxes_new = boxes[gt_boxes.shape[0]:]\n sp_boxes_bv = box_np_ops.center_to_corner_box2d(\n sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6])\n total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)\n # coll_mat = collision_test_allbox(total_bv)\n coll_mat = prep.box_collision_test(total_bv, total_bv)\n diag = np.arange(total_bv.shape[0])\n coll_mat[diag, diag] = False\n valid_samples = []\n idx = num_gt\n for num in group_num:\n if coll_mat[idx:idx + num].any():\n coll_mat[idx:idx + num] = False\n coll_mat[:, idx:idx + num] = False\n else:\n for i in range(num):\n if self._enable_global_rot:\n sampled[idx - num_gt + i][\"box3d_lidar\"][:2] = boxes[idx + i, :2]\n sampled[idx - num_gt + i][\"box3d_lidar\"][-1] = boxes[idx + i, -1]\n sampled[idx - num_gt + i][\"rot_transform\"] = (\n boxes[idx + i, -1] - sp_boxes[idx + i - num_gt, -1])\n\n valid_samples.append(sampled[idx - num_gt + i])\n idx += num\n return valid_samples" ]
[ [ "numpy.logical_not", "numpy.abs", "numpy.arange", "numpy.stack", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.round", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
wilsonyhlee/datasets
[ "0e87e1d053220e8ecddfa679bcd89a4c7bc5af62" ]
[ "src/datasets/search.py" ]
[ "import importlib.util\nimport os\nimport tempfile\nfrom pathlib import PurePath\nfrom typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union\n\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom .utils.logging import WARNING, get_logger\n\n\nif TYPE_CHECKING:\n from .arrow_dataset import Dataset # noqa: F401\n\n try:\n from elasticsearch import Elasticsearch # noqa: F401\n\n except ImportError:\n pass\n try:\n import faiss # noqa: F401\n\n except ImportError:\n pass\n\n_has_elasticsearch = importlib.util.find_spec(\"elasticsearch\") is not None\n_has_faiss = importlib.util.find_spec(\"faiss\") is not None\n\n\nlogger = get_logger(__name__)\n\n\nclass MissingIndex(Exception):\n pass\n\n\nSearchResults = NamedTuple(\"SearchResults\", [(\"scores\", List[float]), (\"indices\", List[int])])\nBatchedSearchResults = NamedTuple(\n \"BatchedSearchResults\", [(\"total_scores\", List[List[float]]), (\"total_indices\", List[List[int]])]\n)\n\nNearestExamplesResults = NamedTuple(\"NearestExamplesResults\", [(\"scores\", List[float]), (\"examples\", dict)])\nBatchedNearestExamplesResults = NamedTuple(\n \"BatchedNearestExamplesResults\", [(\"total_scores\", List[List[float]]), (\"total_examples\", List[dict])]\n)\n\n\nclass BaseIndex:\n \"\"\"Base class for indexing\"\"\"\n\n def search(self, query, k: int = 10) -> SearchResults:\n \"\"\"\n To implement.\n This method has to return the scores and the indices of the retrieved examples given a certain query.\n \"\"\"\n raise NotImplementedError\n\n def search_batch(self, queries, k: int = 10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.\n k (`int`): The number of examples to retrieve per query.\n\n Ouput:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n total_scores, total_indices = [], []\n for query in queries:\n scores, indices = self.search(query, k)\n total_scores.append(scores)\n total_indices.append(indices)\n return BatchedSearchResults(total_scores, total_indices)\n\n def save(self, file: Union[str, PurePath]):\n \"\"\"Serialize the index on disk\"\"\"\n raise NotImplementedError\n\n @classmethod\n def load(cls, file: Union[str, PurePath]) -> \"BaseIndex\":\n \"\"\"Deserialize the index from disk\"\"\"\n raise NotImplementedError\n\n\nclass ElasticSearchIndex(BaseIndex):\n \"\"\"\n Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.\n An Elasticsearch server needs to be accessible, and a python client is declared with\n ```\n es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])\n ```\n for example.\n \"\"\"\n\n def __init__(\n self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n assert (\n _has_elasticsearch\n ), \"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`\"\n assert es_client is None or (\n host is None and port is None\n ), \"Please specify either `es_client` or `(host, port)`, but not both.\"\n host = host or \"localhost\"\n port = port or 9200\n\n import elasticsearch.helpers # noqa: need this to properly load all the es features\n from elasticsearch import Elasticsearch # noqa: F811\n\n self.es_client = es_client if es_client is not None else Elasticsearch([{\"host\": host, \"port\": str(port)}])\n self.es_index_name = (\n es_index_name\n if es_index_name is not None\n else \"huggingface_datasets_\" + os.path.basename(tempfile.NamedTemporaryFile().name)\n )\n self.es_index_config = (\n es_index_config\n if es_index_config is not None\n else {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\"properties\": {\"text\": {\"type\": \"text\", \"analyzer\": \"standard\", \"similarity\": \"BM25\"}}},\n }\n )\n\n def add_documents(self, documents: Union[List[str], \"Dataset\"], column: Optional[str] = None):\n \"\"\"\n Add documents to the index.\n If the documents are inside a certain column, you can specify it using the `column` argument.\n \"\"\"\n index_name = self.es_index_name\n index_config = self.es_index_config\n self.es_client.indices.create(index=index_name, body=index_config)\n number_of_docs = len(documents)\n not_verbose = bool(logger.getEffectiveLevel() > WARNING)\n progress = tqdm(unit=\"docs\", total=number_of_docs, disable=not_verbose)\n successes = 0\n\n def passage_generator():\n if column is not None:\n for i, example in enumerate(documents):\n yield {\"text\": example[column], \"_id\": i}\n else:\n for i, example in enumerate(documents):\n yield {\"text\": example, \"_id\": i}\n\n # create the ES index\n import elasticsearch as es\n\n for ok, action in es.helpers.streaming_bulk(\n client=self.es_client,\n index=index_name,\n actions=passage_generator(),\n ):\n progress.update(1)\n successes += ok\n if successes != len(documents):\n logger.warning(\n f\"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}\"\n )\n logger.info(\"Indexed %d documents\" % (successes,))\n\n def search(self, query: str, k=10) -> SearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n query (`str`): The query as a string.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n scores (`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n response = self.es_client.search(\n index=self.es_index_name,\n body={\"query\": {\"multi_match\": {\"query\": query, \"fields\": [\"text\"], \"type\": \"cross_fields\"}}, \"size\": k},\n )\n hits = response[\"hits\"][\"hits\"]\n return SearchResults([hit[\"_score\"] for hit in hits], [int(hit[\"_id\"]) for hit in hits])\n\n\nclass FaissIndex(BaseIndex):\n \"\"\"\n Dense index using Faiss. It is used to index vectors.\n Faiss is a library for efficient similarity search and clustering of dense vectors.\n It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.\n You can find more information about Faiss here:\n - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU\n \"\"\"\n\n def __init__(\n self,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n ):\n \"\"\"\n Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n \"\"\"\n assert not (\n string_factory is not None and custom_index is not None\n ), \"Please specify either `string_factory` or `custom_index` but not both.\"\n self.device = device\n self.string_factory = string_factory\n self.metric_type = metric_type\n self.faiss_index = custom_index\n if not _has_faiss:\n raise ImportError(\n \"You must install Faiss to use FaissIndex. To do so you can run `pip install faiss-cpu` or `pip install faiss-gpu`\"\n )\n\n def add_vectors(\n self,\n vectors: Union[np.array, \"Dataset\"],\n column: Optional[str] = None,\n batch_size: int = 1000,\n train_size: Optional[int] = None,\n faiss_verbose: Optional[bool] = None,\n ):\n \"\"\"\n Add vectors to the index.\n If the arrays are inside a certain column, you can specify it using the `column` argument.\n \"\"\"\n import faiss # noqa: F811\n\n # Create index\n if self.faiss_index is None:\n size = len(vectors[0]) if column is None else len(vectors[0][column])\n if self.string_factory is not None:\n if self.metric_type is None:\n index = faiss.index_factory(size, self.string_factory)\n else:\n index = faiss.index_factory(size, self.string_factory, self.metric_type)\n else:\n if self.metric_type is None:\n index = faiss.IndexFlat(size)\n else:\n index = faiss.IndexFlat(size, self.metric_type)\n if self.device is not None and self.device > -1:\n self.faiss_res = faiss.StandardGpuResources()\n index = faiss.index_cpu_to_gpu(self.faiss_res, self.device, index)\n self.faiss_index = index\n logger.info(\"Created faiss index of type {}\".format(type(self.faiss_index)))\n\n # Set verbosity level\n if faiss_verbose is not None:\n self.faiss_index.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"index\") and self.faiss_index.index is not None:\n self.faiss_index.index.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"quantizer\") and self.faiss_index.quantizer is not None:\n self.faiss_index.quantizer.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"clustering_index\") and self.faiss_index.clustering_index is not None:\n self.faiss_index.clustering_index.verbose = faiss_verbose\n\n # Train\n if train_size is not None:\n train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]\n logger.info(\"Training the index with the first {} vectors\".format(len(train_vecs)))\n self.faiss_index.train(train_vecs)\n else:\n logger.info(\"Ignored the training step of the faiss index as `train_size` is None.\")\n\n # Add vectors\n logger.info(\"Adding {} vectors to the faiss index\".format(len(vectors)))\n not_verbose = bool(logger.getEffectiveLevel() > WARNING)\n for i in tqdm(range(0, len(vectors), batch_size), disable=not_verbose):\n vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]\n self.faiss_index.add(vecs)\n\n def search(self, query: np.array, k=10) -> SearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n query (`np.array`): The query as a numpy array.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n scores (`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n assert len(query.shape) == 1 or (len(query.shape) == 2 and query.shape[0] == 1)\n queries = query.reshape(1, -1)\n if not queries.flags.c_contiguous:\n queries = np.asarray(queries, order=\"C\")\n scores, indices = self.faiss_index.search(queries, k)\n return SearchResults(scores[0], indices[0].astype(int))\n\n def search_batch(self, queries: np.array, k=10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices to the queries.\n\n Args:\n queries (`np.array`): The queries as a numpy array.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n assert len(queries.shape) == 2\n if not queries.flags.c_contiguous:\n queries = np.asarray(queries, order=\"C\")\n scores, indices = self.faiss_index.search(queries, k)\n return BatchedSearchResults(scores, indices.astype(int))\n\n def save(self, file: Union[str, PurePath]):\n \"\"\"Serialize the FaissIndex on disk\"\"\"\n import faiss # noqa: F811\n\n if self.device is not None and self.device > -1:\n index = faiss.index_gpu_to_cpu(self.faiss_index)\n else:\n index = self.faiss_index\n\n faiss.write_index(index, str(file))\n\n @classmethod\n def load(\n cls,\n file: Union[str, PurePath],\n device: Optional[int] = None,\n ) -> \"FaissIndex\":\n \"\"\"Deserialize the FaissIndex from disk\"\"\"\n import faiss # noqa: F811\n\n faiss_index = cls(device=device)\n index = faiss.read_index(str(file))\n if faiss_index.device is not None and faiss_index.device > -1:\n faiss_index.faiss_res = faiss.StandardGpuResources()\n index = faiss.index_cpu_to_gpu(faiss_index.faiss_res, faiss_index.device, index)\n faiss_index.faiss_index = index\n return faiss_index\n\n\nclass IndexableMixin:\n \"\"\"Add indexing features to `datasets.Dataset`\"\"\"\n\n def __init__(self):\n self._indexes: Dict[str, BaseIndex] = {}\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, key):\n raise NotImplementedError\n\n def is_index_initialized(self, index_name: str) -> bool:\n return index_name in self._indexes\n\n def _check_index_is_initialized(self, index_name: str):\n if not self.is_index_initialized(index_name):\n raise MissingIndex(\n f\"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first.\"\n )\n\n def list_indexes(self) -> List[str]:\n \"\"\"List the colindex_nameumns/identifiers of all the attached indexes.\"\"\"\n return list(self._indexes)\n\n def get_index(self, index_name: str) -> BaseIndex:\n \"\"\"List the index_name/identifiers of all the attached indexes.\n\n Args:\n index_name (:obj:`str`): Index name.\n\n Returns:\n :class:`BaseIndex`\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name]\n\n def add_faiss_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of the specified column.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n\n Args:\n column (:obj:`str`): The column of the vectors to add to the index.\n index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n By defaul it corresponds to `column`.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.\n metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index.\n \"\"\"\n index_name = index_name if index_name is not None else column\n faiss_index = FaissIndex(\n device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index\n )\n faiss_index.add_vectors(self, column=column, train_size=train_size, faiss_verbose=faiss_verbose)\n self._indexes[index_name] = faiss_index\n\n def add_faiss_index_from_external_arrays(\n self,\n external_arrays: np.array,\n index_name: str,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of `external_arrays`.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n\n Args:\n external_arrays (:obj:`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.\n It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.\n metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index.\n \"\"\"\n faiss_index = FaissIndex(\n device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index\n )\n faiss_index.add_vectors(external_arrays, column=None, train_size=train_size, faiss_verbose=faiss_verbose)\n self._indexes[index_name] = faiss_index\n\n def save_faiss_index(self, index_name: str, file: Union[str, PurePath]):\n \"\"\"Save a FaissIndex on disk.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n file (:obj:`str`): The path to the serialized faiss index on disk.\n \"\"\"\n index = self.get_index(index_name)\n if not isinstance(index, FaissIndex):\n raise ValueError(\"Index '{}' is not a FaissIndex but a '{}'\".format(index_name, type(index)))\n index.save(file)\n logger.info(\"Saved FaissIndex {} at {}\".format(index_name, file))\n\n def load_faiss_index(\n self,\n index_name: str,\n file: Union[str, PurePath],\n device: Optional[int] = None,\n ):\n \"\"\"Load a FaissIndex from disk.\n\n If you want to do additional configurations, you can have access to the faiss index object by doing\n `.get_index(index_name).faiss_index` to make it fit your needs.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to\n call `.get_nearest` or `.search`.\n file (:obj:`str`): The path to the serialized faiss index on disk.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n \"\"\"\n index = FaissIndex.load(file, device=device)\n assert index.faiss_index.ntotal == len(\n self\n ), \"Index size should match Dataset size, but Index '{}' at {} has {} elements while the dataset has {} examples.\".format(\n index_name, file, index.faiss_index.ntotal, len(self)\n )\n self._indexes[index_name] = index\n logger.info(\"Loaded FaissIndex {} from {}\".format(index_name, file))\n\n def add_elasticsearch_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Add a text index using ElasticSearch for fast retrieval.\n\n Args:\n column (:obj:`str`): The column of the documents to add to the index.\n index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.\n By defaul it corresponds to `column`.\n host (Optional :obj:`str`, defaults to localhost):\n host of where ElasticSearch is running\n port (Optional :obj:`str`, defaults to 9200):\n port of where ElasticSearch is running\n es_client (Optional :obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index if host and port are None.\n es_index_name (Optional :obj:`str`): The elasticsearch index name used to create the index.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is:\n\n Config::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n \"\"\"\n index_name = index_name if index_name is not None else column\n es_index = ElasticSearchIndex(\n host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config\n )\n es_index.add_documents(self, column=column)\n self._indexes[index_name] = es_index\n\n def load_elasticsearch_index(\n self,\n index_name: str,\n es_index_name: str,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Load an existing text index using ElasticSearch for fast retrieval.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.\n es_index_name (:obj:`str`): The name of elasticsearch index to load.\n host (Optional :obj:`str`, defaults to localhost):\n host of where ElasticSearch is running\n port (Optional :obj:`str`, defaults to 9200):\n port of where ElasticSearch is running\n es_client (Optional :obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index if host and port are None.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n \"\"\"\n self._indexes[index_name] = ElasticSearchIndex(\n host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config\n )\n\n def drop_index(self, index_name: str):\n \"\"\"Drop the index with the specified column.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n \"\"\"\n del self._indexes[index_name]\n\n def search(self, index_name: str, query: Union[str, np.array], k: int = 10) -> SearchResults:\n \"\"\"Find the nearest examples indices in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The name/identifier of the index.\n query (:obj:`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve.\n\n Returns:\n scores (:obj:`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (:obj:`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name].search(query, k)\n\n def search_batch(self, index_name: str, queries: Union[List[str], np.array], k: int = 10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n queries (:obj:`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve per query.\n\n Returns:\n total_scores (:obj:`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (:obj:`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name].search_batch(queries, k)\n\n def get_nearest_examples(\n self, index_name: str, query: Union[str, np.array], k: int = 10\n ) -> NearestExamplesResults:\n \"\"\"Find the nearest examples in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n query (:obj:`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve.\n\n Returns:\n scores (:obj:`List[float]`): The retrieval scores of the retrieved examples.\n examples (:obj:`dict`): The retrieved examples.\n \"\"\"\n self._check_index_is_initialized(index_name)\n scores, indices = self.search(index_name, query, k)\n return NearestExamplesResults(scores, self[[i for i in indices if i >= 0]])\n\n def get_nearest_examples_batch(\n self, index_name: str, queries: Union[List[str], np.array], k: int = 10\n ) -> BatchedNearestExamplesResults:\n \"\"\"Find the nearest examples in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n queries (:obj:`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve per query.\n\n Returns:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_examples (`List[dict]`): The retrieved examples per query.\n \"\"\"\n self._check_index_is_initialized(index_name)\n total_scores, total_indices = self.search_batch(index_name, queries, k)\n return BatchedNearestExamplesResults(\n total_scores, [self[[i for i in indices if i >= 0]] for indices in total_indices]\n )\n" ]
[ [ "numpy.asarray" ] ]
fainall1618/transformer-xl-chinese
[ "d28fc131ed8d5453bbb107e4cb23073edc998e58" ]
[ "tf/data_utils_chinese.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nfrom functools import partial\n\nfrom collections import Counter, OrderedDict\nimport pickle\nimport json\nimport multiprocessing as mp\n\nimport numpy as np\n\nfrom absl import flags\nimport tensorflow as tf\nfrom vocabulary import Vocab\n\nfrom tensorflow.io.gfile import exists as exists\nfrom tensorflow.io.gfile import makedirs as makedirs\nfrom tensorflow.io.gfile import glob as glob\n\n\ndef _preprocess(shard, train, vocab, save_dir, cutoffs, bin_sizes, bsz, tgt_len,\n num_core_per_host, use_tpu, num_shuffle):\n file_names = []\n num_batch = 0\n\n path = train[shard]\n data_shard = vocab.encode_file(path, ordered=False, add_double_eos=True)\n\n for shuffle in range(num_shuffle):\n basename = \"train-{:03d}-{:02d}\".format(shard, shuffle)\n print(\"Processing shard {} shuffle {}\".format(shard, shuffle))\n\n np.random.shuffle(data_shard)\n file_name, num_batch_shuffle = create_ordered_tfrecords(\n save_dir, basename, np.concatenate(data_shard), bsz, tgt_len,\n num_core_per_host, cutoffs, bin_sizes, use_tpu=use_tpu)\n file_names.append(file_name)\n num_batch += num_batch_shuffle\n\n return file_names, num_batch\n\n\nclass Corpus(object):\n def __init__(self, path, dataset, *args, **kwargs):\n self.dataset = dataset\n self.vocab = Vocab(*args, **kwargs)\n\n self.vocab.count_file(os.path.join(path, \"train.txt\"))\n self.vocab.build_vocab()\n\n self.train = self.vocab.encode_file(\n os.path.join(path, \"train.txt\"), ordered=True)\n self.valid = self.vocab.encode_file(\n os.path.join(path, \"valid.txt\"), ordered=True)\n self.test = self.vocab.encode_file(\n os.path.join(path, \"train.txt\"), ordered=True)\n\n self.cutoffs = []\n\n def convert_to_tfrecords(self, split, save_dir, bsz, tgt_len,\n num_core_per_host, **kwargs):\n FLAGS = kwargs.get('FLAGS')\n\n file_names = []\n use_tpu = FLAGS.use_tpu and not (split == \"test\" and num_core_per_host == 1)\n\n if use_tpu:\n record_name = \"record_info-{}.bsz-{}.tlen-{}.core-{}.json\".format(\n split, bsz, tgt_len, num_core_per_host)\n else:\n record_name = \"record_info-{}.bsz-{}.tlen-{}.json\".format(\n split, bsz, tgt_len)\n\n record_info_path = os.path.join(save_dir, record_name)\n\n if self.dataset in [\"ptb\", \"wt2\", \"wt103\", \"enwik8\", \"tangshi\", \"doupo\", \"test\", \"zhihu\", \"poetry\"]:\n data = getattr(self, split)\n\n bin_sizes = get_bin_sizes(\n data, bsz // num_core_per_host, tgt_len, self.cutoffs)\n file_name, num_batch = create_ordered_tfrecords(\n save_dir, split, data, bsz, tgt_len, num_core_per_host,\n self.cutoffs, bin_sizes,\n num_passes=FLAGS.num_passes if split == 'train' and use_tpu else 1,\n use_tpu=use_tpu)\n file_names.append(file_name)\n\n with open(record_info_path, \"w\") as fp:\n record_info = {\n \"filenames\": file_names,\n \"bin_sizes\": bin_sizes,\n \"num_batch\": num_batch\n }\n json.dump(record_info, fp)\n\n\ndef get_bin_sizes(data, batch_size, tgt_len, cutoffs, std_mult=[2.5, 2.5, 2.5]):\n \"\"\"\n Note: the `batch_size` here should be per-core batch size\n \"\"\"\n bin_sizes = []\n\n def _nearest_to_eight(x): # so that it's faster on TPUs\n y = x - x % 8\n return y + 8 if x % 8 >= 4 else max(8, y)\n\n if cutoffs:\n num_batch = len(data) // batch_size // tgt_len\n\n data = data[:batch_size * num_batch * tgt_len]\n data = data.reshape(batch_size, num_batch, tgt_len)\n\n tot = batch_size * tgt_len\n for b, (left, right) in enumerate(zip(cutoffs[1:-1], cutoffs[2:])):\n mask = (data >= left) * (data < right)\n percents = mask.astype(np.float64).sum(2).sum(0) / tot\n mean = np.mean(percents)\n std = np.std(percents)\n\n bin_size = int(math.ceil(tgt_len * batch_size * (mean + std_mult[b] * std)))\n bin_size = _nearest_to_eight(bin_size)\n bin_sizes.append(bin_size)\n\n return bin_sizes\n\n\ndef _int64_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef _float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n\ndef batchify(data, batch_size, num_passes):\n \"\"\"\n if use_tpu = True: num_passes > 1\n\n Since TPU training requires entire [bsz x tgt_len] chunks, it can discard\n as many as `bsz * tgt_len` tokens in training. When `bsz` and `tgt_len` are\n both large, as in the case of TPU training for Transformer-XL, the problem\n may lead to detectable performance drop.\n\n Here, we use multiple randomly shifted copies to deal with this problem.\n \"\"\"\n if num_passes > 1:\n data_len = len(data)\n double_data = np.concatenate([data, data])\n data_list = []\n for i in range(num_passes):\n start = np.random.randint(0, data_len)\n data_list.append(double_data[start:start + data_len])\n data = np.concatenate(data_list)\n\n num_step = len(data) // batch_size\n data = data[:batch_size * num_step]\n data = data.reshape(batch_size, num_step)\n\n return data\n\n\ndef create_ordered_tfrecords(save_dir, basename, data, batch_size, tgt_len,\n num_core_per_host, cutoffs=[], bin_sizes=[],\n num_passes=1, use_tpu=False):\n # save_dir 就是tfrecord的路径\n if use_tpu:\n file_name = \"{}.bsz-{}.tlen-{}.core-{}.tfrecords\".format(\n basename, batch_size, tgt_len, num_core_per_host)\n else:\n file_name = \"{}.bsz-{}.tlen-{}.tfrecords\".format(\n basename, batch_size, tgt_len)\n\n save_path = os.path.join(save_dir, file_name)\n record_writer = tf.compat.v1.python_io.TFRecordWriter(save_path)\n\n batched_data = batchify(data, batch_size, num_passes)\n\n num_batch = 0\n for t in range(0, batched_data.shape[1] - 1, tgt_len):\n cur_tgt_len = min(batched_data.shape[1] - 1 - t, tgt_len)\n # drop the remainder if use tpu\n if use_tpu and cur_tgt_len < tgt_len:\n break\n if num_batch % 500 == 0:\n print(\" processing batch {}\".format(num_batch))\n for idx in range(batch_size):\n inputs = batched_data[idx, t:t + cur_tgt_len]\n labels = batched_data[idx, t + 1:t + cur_tgt_len + 1]\n\n # features dict\n feature = {\n \"inputs\": _int64_feature(inputs),\n \"labels\": _int64_feature(labels),\n }\n\n if len(cutoffs) > 0 and use_tpu:\n # validate `bin_sizes` and `cutoffs`\n assert len(cutoffs) - len(bin_sizes) == 2, \\\n \"len(cutoffs) - len(bin_sizes) != 2\"\n\n # mask for bin 0\n left, right = cutoffs[:2]\n inp_mask = ((inputs >= left) * (inputs < right)).astype(np.float32)\n tgt_mask = ((labels >= left) * (labels < right)).astype(np.float32)\n\n feature[\"inp_mask\"] = _float_feature(inp_mask)\n feature[\"tgt_mask\"] = _float_feature(tgt_mask)\n\n # refresh `inp_cnts` and `tgt_cnts` for each TPU core\n if idx % (batch_size // num_core_per_host) == 0:\n inp_cnts = [0] * len(bin_sizes)\n tgt_cnts = [0] * len(bin_sizes)\n\n head_labels = np.copy(labels)\n inp_pos_per_bin, tgt_pos_per_bin = [], []\n for b, (left, right) in enumerate(zip(cutoffs[1:-1], cutoffs[2:])):\n inp_pos = np.where((inputs >= left) * (inputs < right))[0]\n tgt_pos = np.where((labels >= left) * (labels < right))[0]\n inp_pos_per_bin.append(inp_pos)\n tgt_pos_per_bin.append(tgt_pos)\n\n head_labels[tgt_pos] = cutoffs[1] + b\n\n feature[\"head_labels\"] = _int64_feature(head_labels)\n\n # permutation feature\n def _add_perm_feature(feature, pos_per_bin, cnts, prefix):\n for b, pos in enumerate(pos_per_bin):\n idx_tuple = []\n for p in pos:\n if cnts[b] < bin_sizes[b]:\n idx_tuple.append([p, cnts[b]])\n cnts[b] += 1\n else:\n break\n\n n_tup = len(idx_tuple)\n tup = np.array(idx_tuple).reshape(n_tup * 2)\n\n feature[\"{}_cnt_{}\".format(prefix, b)] = _int64_feature([n_tup])\n feature[\"{}_tup_{}\".format(prefix, b)] = _int64_feature(tup)\n\n _add_perm_feature(feature, inp_pos_per_bin, inp_cnts, \"inp\")\n _add_perm_feature(feature, tgt_pos_per_bin, tgt_cnts, \"tgt\")\n\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n record_writer.write(example.SerializeToString())\n\n num_batch += 1\n\n record_writer.close()\n print(\"Done writing {}. batches: {}\".format(file_name, num_batch))\n\n return file_name, num_batch\n\n\ndef get_lm_corpus(data_dir, dataset):\n fn = os.path.join(data_dir, \"cache.pkl\")\n\n if exists(fn):\n print(\"Loading cached dataset...\")\n with open(fn, \"rb\") as fp:\n corpus = pickle.load(fp)\n else:\n print(\"Producing dataset...\")\n kwargs = {}\n if dataset in [\"doupo\", \"test\", \"wt103\", \"zhihu\", \"poetry\", \"tangshi\"]:\n kwargs[\"special\"] = [\"<eos>\"]\n kwargs[\"lower_case\"] = False\n\n corpus = Corpus(data_dir, dataset, **kwargs)\n\n print(\"Saving dataset...\")\n with open(fn, \"wb\") as fp:\n pickle.dump(corpus, fp, protocol=2)\n\n corpus_info = {\n \"vocab_size\": len(corpus.vocab),\n \"cutoffs\": corpus.cutoffs,\n \"dataset\": corpus.dataset\n }\n with open(os.path.join(data_dir, \"corpus-info.json\"), \"w\") as fp:\n json.dump(corpus_info, fp)\n\n return corpus\n\n\ndef main(unused_argv):\n del unused_argv # Unused\n\n corpus = get_lm_corpus(FLAGS.data_dir, FLAGS.dataset) #\n\n save_dir = os.path.join(FLAGS.data_dir, \"tfrecords\")\n if not exists(save_dir):\n makedirs(save_dir)\n\n # test mode\n if FLAGS.per_host_test_bsz > 0:\n corpus.convert_to_tfrecords(\"test\", save_dir, FLAGS.per_host_test_bsz,\n FLAGS.tgt_len, FLAGS.num_core_per_host,\n FLAGS=FLAGS)\n return\n\n for split, batch_size in zip(\n [\"train\", \"valid\"],\n [FLAGS.per_host_train_bsz, FLAGS.per_host_valid_bsz]):\n\n if batch_size <= 0: continue\n print(\"Converting {} set...\".format(split))\n corpus.convert_to_tfrecords(split, save_dir, batch_size, FLAGS.tgt_len,\n FLAGS.num_core_per_host, FLAGS=FLAGS)\n\n\ndef load_record_info(record_info_dir, split, per_host_bsz, tgt_len,\n num_core_per_host, use_tpu):\n if use_tpu:\n record_name = \"record_info-{}.bsz-{}.tlen-{}.core-{}.json\".format(\n split, per_host_bsz, tgt_len, num_core_per_host)\n else:\n record_name = \"record_info-{}.bsz-{}.tlen-{}.json\".format(\n split, per_host_bsz, tgt_len)\n\n record_info_path = os.path.join(record_info_dir, record_name)\n with open(record_info_path, \"r\") as fp:\n record_info = json.load(fp)\n\n return record_info\n\n\ndef get_input_fn(record_info_dir, split, per_host_bsz, tgt_len,\n num_core_per_host, num_hosts=1, use_tpu=False):\n \"\"\"Creates input function.\"\"\"\n record_info = load_record_info(record_info_dir, split, per_host_bsz, tgt_len,\n num_core_per_host, use_tpu=use_tpu)\n\n # 读取一些batch size的信息 冗余\n file_names = record_info[\"filenames\"]\n bin_sizes = record_info[\"bin_sizes\"]\n num_batch = record_info[\"num_batch\"]\n\n tf.compat.v1.logging.info(\"[{}] File names {}\".format(split, file_names))\n\n def input_fn(params):\n # per-core batch size\n per_core_bsz = params[\"batch_size\"]\n\n # data_dir could be a remote path, e.g., a google storage url\n data_dir = params[\"data_dir\"]\n\n def parser(record):\n # preprocess \"inp_perm\" and \"tgt_perm\"\n def _process_perm_feature(example, prefix):\n for b in range(len(bin_sizes)):\n cnt = example.pop(\"{}_cnt_{}\".format(prefix, b))[0]\n tup = example.pop(\"{}_tup_{}\".format(prefix, b))\n\n tup = tf.reshape(\n tf.sparse_tensor_to_dense(tup),\n shape=[cnt, 2])\n\n # tf.float32\n perm = tf.sparse_to_dense(\n sparse_indices=tup,\n output_shape=[tgt_len, bin_sizes[b]],\n sparse_values=1.0,\n default_value=0.0)\n\n example[\"{}_perm_{}\".format(prefix, b)] = perm\n\n # whether allow the last batch with a potentially shorter length\n if use_tpu:\n record_spec = {\n \"inputs\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n }\n else:\n record_spec = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"labels\": tf.VarLenFeature(tf.int64),\n }\n\n # permutation related features\n if bin_sizes and use_tpu:\n # tf.float32\n record_spec[\"inp_mask\"] = tf.FixedLenFeature([tgt_len], tf.float32)\n record_spec[\"tgt_mask\"] = tf.FixedLenFeature([tgt_len], tf.float32)\n\n record_spec[\"head_labels\"] = tf.FixedLenFeature([tgt_len], tf.int64)\n\n for b in range(len(bin_sizes)):\n record_spec[\"inp_cnt_{}\".format(b)] = tf.FixedLenFeature([1], tf.int64)\n record_spec[\"inp_tup_{}\".format(b)] = tf.VarLenFeature(tf.int64)\n record_spec[\"tgt_cnt_{}\".format(b)] = tf.FixedLenFeature([1], tf.int64)\n record_spec[\"tgt_tup_{}\".format(b)] = tf.VarLenFeature(tf.int64)\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n # transform permutation tuples to permutation matrices\n if bin_sizes and use_tpu:\n _process_perm_feature(example, \"inp\")\n _process_perm_feature(example, \"tgt\")\n\n # cast int64 into int32\n # cast sparse to dense\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.to_int32(val)\n example[key] = val\n\n if use_tpu:\n return example\n else:\n return example[\"inputs\"], example[\"labels\"]\n\n file_paths = []\n for file_name in file_names:\n file_path = os.path.join(data_dir, file_name)\n file_paths.append(file_path)\n\n if split == \"train\":\n dataset = tf.data.Dataset.from_tensor_slices(file_paths)\n if len(file_paths) > 1:\n dataset = dataset.shuffle(len(file_paths)).repeat()\n dataset = tf.data.TFRecordDataset(dataset)\n elif num_hosts > 1:\n host_id = params[\"context\"].current_host\n # drop the remaining batches\n num_batch_per_host = num_batch // num_hosts\n\n my_start_sample_id = (host_id * num_batch_per_host * num_core_per_host *\n per_core_bsz)\n my_sample_num = num_batch_per_host * num_core_per_host * per_core_bsz\n dataset = tf.data.TFRecordDataset(dataset).skip(\n my_start_sample_id).take(my_sample_num)\n else:\n dataset = tf.data.TFRecordDataset(dataset)\n\n dataset = dataset.map(parser).cache().repeat()\n dataset = dataset.batch(per_core_bsz, drop_remainder=True)\n dataset = dataset.prefetch(num_core_per_host * per_core_bsz)\n else:\n # do not shuffle, repeat or cache in evaluation\n dataset = tf.data.Dataset.from_tensor_slices(file_paths)\n dataset = tf.data.TFRecordDataset(dataset)\n dataset = dataset.map(parser)\n dataset = dataset.batch(per_core_bsz, drop_remainder=True)\n\n return dataset\n\n if split == \"train\" and num_hosts > 1:\n record_info[\"num_batch\"] = num_batch // num_hosts\n\n return input_fn, record_info\n\n\ndef get_corpus_info(corpus_info_path):\n with open(corpus_info_path, \"r\") as fp:\n corpus_info = json.load(fp)\n return corpus_info\n\n\nif __name__ == \"__main__\":\n FLAGS = flags.FLAGS\n flags.DEFINE_string(\"data_dir\", None,\n help=\"Location of the data corpus\")\n flags.DEFINE_enum(\"dataset\", \"poetry\",\n [\"ptb\", \"wt2\", \"wt103\", \"lm1b\", \"enwik8\", \"text8\", \"doupo\", \"test\", \"zhihu\", \"poetry\",\"tangshi\"],\n help=\"Dataset name.\")\n flags.DEFINE_integer(\"per_host_train_bsz\", 60,\n help=\"train batch size each host\")\n flags.DEFINE_integer(\"per_host_valid_bsz\", 60,\n help=\"valid batch size each host\")\n flags.DEFINE_integer(\"per_host_test_bsz\", 0,\n help=\"If > 0, enter test mode and process test set only.\"\n \"Otherwise, process train and dev sets only.\")\n flags.DEFINE_integer(\"tgt_len\", 70,\n help=\"number of tokens to predict\")\n flags.DEFINE_integer(\"max_batch\", -1,\n help=\"run in debug mode\")\n flags.DEFINE_integer(\"num_core_per_host\", 2,\n help=\"8 for TPU v2.\")\n flags.DEFINE_bool(\"debug\", default=False,\n help=\"Process only the first batch without shuffle for lm1b.\")\n flags.DEFINE_integer(\"num_procs\", 1,\n help=\"number of processes\")\n flags.DEFINE_integer(\"num_passes\", 10,\n help=\"number of passes when use_tpu=True\")\n flags.DEFINE_integer(\"num_shuffle\", 4,\n help=\"number of shuffles for lm1b\")\n flags.DEFINE_bool(\"use_tpu\", True,\n help=\"use tpu\")\n\n tf.compat.v1.app.run(main)\n" ]
[ [ "tensorflow.FixedLenFeature", "numpy.concatenate", "numpy.mean", "tensorflow.to_int32", "numpy.where", "tensorflow.train.Int64List", "numpy.random.randint", "tensorflow.compat.v1.app.run", "tensorflow.sparse_to_dense", "tensorflow.data.TFRecordDataset", "numpy.std", "numpy.copy", "tensorflow.parse_single_example", "tensorflow.keras.backend.is_sparse", "tensorflow.io.gfile.exists", "tensorflow.io.gfile.makedirs", "tensorflow.sparse_tensor_to_dense", "tensorflow.train.FloatList", "tensorflow.train.Features", "tensorflow.VarLenFeature", "numpy.array", "tensorflow.sparse.to_dense", "tensorflow.data.Dataset.from_tensor_slices", "numpy.random.shuffle", "tensorflow.compat.v1.python_io.TFRecordWriter" ] ]
igorvlnascimento/open-nre-plus
[ "6c994fe4e5d19734a34592fc630cedd0547ed989" ]
[ "opennre/encoder/distilbert_encoder.py" ]
[ "import logging\nimport torch\nimport torch.nn as nn\nfrom transformers import DistilBertTokenizer, DistilBertModel\nfrom .base_encoder import BaseEncoder\n\nclass DistilBertEncoder(nn.Module):\n def __init__(self, max_length, pretrain_path, blank_padding=True, mask_entity=False):\n \"\"\"\n Args:\n max_length: max length of sentence\n pretrain_path: path of pretrain model\n \"\"\"\n super().__init__()\n self.max_length = max_length\n self.blank_padding = blank_padding\n self.hidden_size = 768\n self.mask_entity = mask_entity\n logging.info('Loading BERT pre-trained checkpoint.')\n self.bert = DistilBertModel.from_pretrained(pretrain_path)\n self.tokenizer = DistilBertTokenizer.from_pretrained(pretrain_path)\n\n def forward(self, token, att_mask):\n \"\"\"\n Args:\n token: (B, L), index of tokens\n att_mask: (B, L), attention mask (1 for contents and 0 for padding)\n Return:\n (B, H), representations for sentences\n \"\"\"\n _, x = self.bert(token, attention_mask=att_mask)\n return x\n\n def tokenize(self, item):\n \"\"\"\n Args:\n item: data instance containing 'text' / 'token', 'h' and 't'\n Return:\n Name of the relation of the sentence\n \"\"\"\n # Sentence -> token\n if 'text' in item:\n sentence = item['text']\n is_token = False\n else:\n sentence = item['token']\n is_token = True\n pos_head = item['h']['pos']\n pos_tail = item['t']['pos']\n\n pos_min = pos_head\n pos_max = pos_tail\n if pos_head[0] > pos_tail[0]:\n pos_min = pos_tail\n pos_max = pos_head\n rev = True\n else:\n rev = False\n \n if not is_token:\n sent0 = self.tokenizer.tokenize(sentence[:pos_min[0]])\n ent0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])\n sent1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])\n ent1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])\n sent2 = self.tokenizer.tokenize(sentence[pos_max[1]:])\n else:\n sent0 = self.tokenizer.tokenize(' '.join(sentence[:pos_min[0]]))\n ent0 = self.tokenizer.tokenize(' '.join(sentence[pos_min[0]:pos_min[1]]))\n sent1 = self.tokenizer.tokenize(' '.join(sentence[pos_min[1]:pos_max[0]]))\n ent1 = self.tokenizer.tokenize(' '.join(sentence[pos_max[0]:pos_max[1]]))\n sent2 = self.tokenizer.tokenize(' '.join(sentence[pos_max[1]:]))\n\n if self.mask_entity:\n ent0 = ['[unused4]'] if not rev else ['[unused5]']\n ent1 = ['[unused5]'] if not rev else ['[unused4]']\n else:\n ent0 = ['[unused0]'] + ent0 + ['[unused1]'] if not rev else ['[unused2]'] + ent0 + ['[unused3]']\n ent1 = ['[unused2]'] + ent1 + ['[unused3]'] if not rev else ['[unused0]'] + ent1 + ['[unused1]']\n\n re_tokens = ['[CLS]'] + sent0 + ent0 + sent1 + ent1 + sent2 + ['[SEP]']\n \n indexed_tokens = self.tokenizer.convert_tokens_to_ids(re_tokens)\n avai_len = len(indexed_tokens)\n\n # Padding\n if self.blank_padding:\n while len(indexed_tokens) < self.max_length:\n indexed_tokens.append(0) # 0 is id for [PAD]\n indexed_tokens = indexed_tokens[:self.max_length]\n indexed_tokens = torch.tensor(indexed_tokens).long().unsqueeze(0) # (1, L)\n\n # Attention mask\n att_mask = torch.zeros(indexed_tokens.size()).long() # (1, L)\n att_mask[0, :avai_len] = 1\n\n return indexed_tokens, att_mask\n\n\nclass DistilBertEntityEncoder(nn.Module):\n def __init__(self, max_length, pretrain_path, blank_padding=True, mask_entity=False):\n \"\"\"\n Args:\n max_length: max length of sentence\n pretrain_path: path of pretrain model\n \"\"\"\n super().__init__()\n self.max_length = max_length\n self.blank_padding = blank_padding\n self.hidden_size = 768 * 2\n self.mask_entity = mask_entity\n logging.info('Loading BERT pre-trained checkpoint.')\n self.bert = DistilBertModel.from_pretrained(pretrain_path)\n self.tokenizer = DistilBertTokenizer.from_pretrained(pretrain_path)\n self.linear = nn.Linear(self.hidden_size, self.hidden_size)\n\n def forward(self, token, att_mask, pos1, pos2):\n \"\"\"\n Args:\n token: (B, L), index of tokens\n att_mask: (B, L), attention mask (1 for contents and 0 for padding)\n pos1: (B, 1), position of the head entity starter\n pos2: (B, 1), position of the tail entity starter\n Return:\n (B, 2H), representations for sentences\n \"\"\"\n hidden = self.bert(token, attention_mask=att_mask)[0]\n # Get entity start hidden state\n onehot_head = torch.zeros(hidden.size()[:2]).float().to(hidden.device) # (B, L)\n onehot_tail = torch.zeros(hidden.size()[:2]).float().to(hidden.device) # (B, L)\n onehot_head = onehot_head.scatter_(1, pos1, 1)\n onehot_tail = onehot_tail.scatter_(1, pos2, 1)\n head_hidden = (onehot_head.unsqueeze(2) * hidden).sum(1) # (B, H)\n tail_hidden = (onehot_tail.unsqueeze(2) * hidden).sum(1) # (B, H)\n x = torch.cat([head_hidden, tail_hidden], 1) # (B, 2H)\n x = self.linear(x)\n return x\n\n def tokenize(self, item):\n \"\"\"\n Args:\n item: data instance containing 'text' / 'token', 'h' and 't'\n Return:\n Name of the relation of the sentence\n \"\"\"\n # Sentence -> token\n if 'text' in item:\n sentence = item['text']\n is_token = False\n else:\n sentence = item['token']\n is_token = True\n pos_head = item['h']['pos']\n pos_tail = item['t']['pos']\n\n pos_min = pos_head\n pos_max = pos_tail\n if pos_head[0] > pos_tail[0]:\n pos_min = pos_tail\n pos_max = pos_head\n rev = True\n else:\n rev = False\n \n if not is_token:\n sent0 = self.tokenizer.tokenize(sentence[:pos_min[0]])\n ent0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])\n sent1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])\n ent1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])\n sent2 = self.tokenizer.tokenize(sentence[pos_max[1]:])\n else:\n sent0 = self.tokenizer.tokenize(' '.join(sentence[:pos_min[0]]))\n ent0 = self.tokenizer.tokenize(' '.join(sentence[pos_min[0]:pos_min[1]]))\n sent1 = self.tokenizer.tokenize(' '.join(sentence[pos_min[1]:pos_max[0]]))\n ent1 = self.tokenizer.tokenize(' '.join(sentence[pos_max[0]:pos_max[1]]))\n sent2 = self.tokenizer.tokenize(' '.join(sentence[pos_max[1]:]))\n\n if self.mask_entity:\n ent0 = ['[unused4]'] if not rev else ['[unused5]']\n ent1 = ['[unused5]'] if not rev else ['[unused4]']\n else:\n ent0 = ['[unused0]'] + ent0 + ['[unused1]'] if not rev else ['[unused2]'] + ent0 + ['[unused3]']\n ent1 = ['[unused2]'] + ent1 + ['[unused3]'] if not rev else ['[unused0]'] + ent1 + ['[unused1]']\n\n re_tokens = ['[CLS]'] + sent0 + ent0 + sent1 + ent1 + sent2 + ['[SEP]']\n pos1 = 1 + len(sent0) if not rev else 1 + len(sent0 + ent0 + sent1)\n pos2 = 1 + len(sent0 + ent0 + sent1) if not rev else 1 + len(sent0)\n pos1 = min(self.max_length - 1, pos1)\n pos2 = min(self.max_length - 1, pos2)\n \n indexed_tokens = self.tokenizer.convert_tokens_to_ids(re_tokens)\n avai_len = len(indexed_tokens)\n\n # Position\n pos1 = torch.tensor([[pos1]]).long()\n pos2 = torch.tensor([[pos2]]).long()\n\n # Padding\n if self.blank_padding:\n while len(indexed_tokens) < self.max_length:\n indexed_tokens.append(0) # 0 is id for [PAD]\n indexed_tokens = indexed_tokens[:self.max_length]\n indexed_tokens = torch.tensor(indexed_tokens).long().unsqueeze(0) # (1, L)\n\n # Attention mask\n att_mask = torch.zeros(indexed_tokens.size()).long() # (1, L)\n att_mask[0, :avai_len] = 1\n\n return indexed_tokens, att_mask, pos1, pos2\n" ]
[ [ "torch.tensor", "torch.nn.Linear", "torch.cat" ] ]
jinxixiang/PC-TMB
[ "c6f2fc62629c7f026865774cdfb9d826464397ea" ]
[ "construct_graph.py" ]
[ "### System\nimport os, sys\nimport h5py\nfrom tqdm import tqdm\nimport numpy as np\nimport nmslib\nimport torch\n\n\nclass Hnsw:\n def __init__(self, space='cosinesimil', index_params=None,\n query_params=None, print_progress=True):\n self.space = space\n self.index_params = index_params\n self.query_params = query_params\n self.print_progress = print_progress\n\n def fit(self, X):\n index_params = self.index_params\n if index_params is None:\n index_params = {'M': 16, 'post': 0, 'efConstruction': 400}\n\n query_params = self.query_params\n if query_params is None:\n query_params = {'ef': 90}\n\n # this is the actual nmslib part, hopefully the syntax should\n # be pretty readable, the documentation also has a more verbiage\n # introduction: https://nmslib.github.io/nmslib/quickstart.html\n index = nmslib.init(space=self.space, method='hnsw')\n index.addDataPointBatch(X)\n index.createIndex(index_params, print_progress=self.print_progress)\n index.setQueryTimeParams(query_params)\n\n self.index_ = index\n self.index_params_ = index_params\n self.query_params_ = query_params\n return self\n\n def query(self, vector, topn):\n # the knnQuery returns indices and corresponding distance\n # we will throw the distance away for now\n indices, dist = self.index_.knnQuery(vector, k=topn)\n return indices\n\n\ndef pt2graph(wsi_h5, radius=9):\n from torch_geometric.data import Data as geomData\n from itertools import chain\n coords, features = np.array(wsi_h5['coords']), np.array(wsi_h5['features'])\n assert coords.shape[0] == features.shape[0]\n num_patches = coords.shape[0]\n\n model = Hnsw(space='l2')\n model.fit(coords)\n a = np.repeat(range(num_patches), radius - 1)\n b = np.fromiter(chain(*[model.query(coords[v_idx], topn=radius)[1:] for v_idx in range(num_patches)]), dtype=int)\n\n if a.shape[0] != b.shape[0]:\n print(f\"a shape: {a.shape} b shape: {b.shape}\")\n return None\n\n edge_spatial = torch.Tensor(np.stack([a, b])).type(torch.LongTensor)\n\n model = Hnsw(space='l2')\n model.fit(features)\n a = np.repeat(range(num_patches), radius - 1)\n b = np.fromiter(chain(*[model.query(coords[v_idx], topn=radius)[1:] for v_idx in range(num_patches)]), dtype=int)\n edge_latent = torch.Tensor(np.stack([a, b])).type(torch.LongTensor)\n\n G = geomData(x=torch.Tensor(features),\n edge_index=edge_spatial,\n edge_latent=edge_latent,\n centroid=torch.Tensor(coords))\n return G\n\n\nif __name__ == \"__main__\":\n\n def createDir_h5toPyG(h5_path, save_path):\n pbar = tqdm(os.listdir(h5_path))\n for h5_fname in pbar:\n pbar.set_description('%s - Creating Graph' % (h5_fname))\n\n wsi_h5 = h5py.File(os.path.join(h5_path, h5_fname), \"r\")\n G = pt2graph(wsi_h5)\n if G is None:\n continue\n\n torch.save(G, os.path.join(save_path, h5_fname[:-3] + '.pt'))\n wsi_h5.close()\n\n\n h5_path = '/PATH/TO//patch_coord_feature'\n save_path = ''\n os.makedirs(save_path, exist_ok=True)\n\n createDir_h5toPyG(h5_path, save_path)\n" ]
[ [ "numpy.array", "numpy.stack", "torch.Tensor" ] ]
ekayen/rnng-pytorch
[ "4cdfcb62f18a214011a8ea4c034fbf9041ac6012" ]
[ "beam_search.py" ]
[ "import sys\nimport os\n\nimport argparse\nimport json\nimport random\nimport shutil\nimport copy\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import cuda\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nfrom tqdm import tqdm\n\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nimport logging\nfrom data import Dataset\nfrom utils import *\nfrom in_order_models import InOrderRNNG\nfrom fixed_stack_in_order_models import FixedStackInOrderRNNG\nfrom train import get_sp_feats\n\ntorch.cuda.empty_cache()\nimport gc\n\ngc.collect()\n \n\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser()\n\n# Data path options\nparser.add_argument('--test_file', default='data/ptb-test.raw.txt')\nparser.add_argument('--lm_output_file', default='surprisals.txt')\nparser.add_argument('--model_file', default='rnng.pt')\nparser.add_argument('--beam_size', type=int, default=200)\nparser.add_argument('--word_beam_size', type=int, default=20)\nparser.add_argument('--shift_size', type=int, default=5)\nparser.add_argument('--batch_size', type=int, default=10, help='Please decrease this if memory error occurs.')\nparser.add_argument('--block_size', type=int, default=100)\nparser.add_argument('--batch_token_size', type=int, default=300,\n help='Number of tokens in a batch (batch_size*sentence_length) does not exceed this. This value could be large value (e.g., 10000) safely when --stack_size_bound is set to some value > 0. Otherwise, we need to control (decrease) the batch size for longer sentences using this option, because then stack_size_bound will grow by sentence length.')\nparser.add_argument('--stack_size_bound', type=int, default=-1,\n help='Stack size during search is bounded by this size. If <= 0, the maximum size grows by sentence length (set by `sentence_length+10`). 100 looks sufficient for most of the grammars. Bounding to some value (e.g., 100) is useful to reduce memory usage while increasing beam size.')\nparser.add_argument('--delay_word_ll', action='store_true',\n help='Adding shift word probability is delayed at word-synchronizing step')\nparser.add_argument('--particle_filter', action='store_true', help='search with particle filter')\nparser.add_argument('--particle_size', type=int, default=10000)\nparser.add_argument('--original_reweight', action='store_true',\n help='If True, use the original reweighting (Eq. 4 in Crabbé et al. 2019) for particle filtering.')\nparser.add_argument('--dump_beam', action='store_true',\n help='(For debug and model development) print out all states in the beam at each step')\nparser.add_argument('--gpu', default=0, type=int, help='which gpu to use')\nparser.add_argument('--device', default='cuda', choices=['cuda', 'cpu'],\n help='If \"cuda\", GPU number --gpu is used.')\nparser.add_argument('--seed', default=3435, type=int)\nparser.add_argument('--fp16', action='store_true')\nparser.add_argument('--max_length_diff', default=20, type=int,\n help='Maximum sentence length difference in a single batch does not exceed this.')\nparser.add_argument('--back_context',type=int,default=0,help='num tokens of backward PROSODIC context to include')\nparser.add_argument('--for_context',type=int,default=0,help='num tokens of forward PROSODIC context to include')\nparser.add_argument('--context_strat',type=str,default='all',help='Strategy for processing extra context: all = keep all frames, pool = mean pool frames, leading = keep leading n frames')\n\n\ndef load_model(checkpoint, action_dict, vocab):\n if 'model_state_dict' in checkpoint:\n from train import create_model\n model = create_model(checkpoint['args'], action_dict, vocab)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model\n else:\n return checkpoint['model']\n\ndef main(args):\n \"\"\"\n if args.device == 'cuda':\n device = 'cuda:{}'.format(args.gpu)\n else:\n device = 'cpu'\n \"\"\"\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n checkpoint = torch.load(args.model_file)\n vocab = checkpoint['vocab']\n action_dict = checkpoint['action_dict']\n prepro_args = checkpoint['prepro_args']\n model = load_model(checkpoint, action_dict, vocab).to(device)\n\n if args.fp16:\n model.half()\n\n # todo: add tagger.\n if args.test_file.endswith('json'):\n dataset = Dataset.from_json(args.test_file, args.batch_size, vocab, action_dict,\n batch_token_size = args.batch_token_size,\n batch_group = 'similar_length',test=True\n )\n else:\n dataset = Dataset.from_text_file(args.test_file, args.batch_size, vocab, action_dict,\n prepro_args = prepro_args,\n batch_token_size = args.batch_token_size,\n batch_group = 'similar_length'\n )\n logger.info(\"model architecture\")\n logger.info(model)\n model.eval()\n\n if isinstance(model, InOrderRNNG) or isinstance(model, FixedStackInOrderRNNG):\n # A crude way to modify this parameter, which is set at training, and was previosuly defaulted to 8.\n # But I noticed that for in-order models, 8 is too much, and it considerably slows down the search.\n # In practice, for in-order models, max_cons_nts equals max length of unary chains, which, in PTB,\n # does not exceed 3, though this may be corpus or language specific. Here, I reset it to 3 manually.\n # For future, if all models are trained on the modified default values (now, in 3), this line could\n # be deleted.\n model.max_cons_nts = 3\n\n cur_block_size = 0\n\n all_parses = []\n all_surprisals = []\n\n def sort_and_print_trees(block_idxs, block_parses, block_surprisals):\n parse_idx_to_sent_idx = sorted(list(enumerate(block_idxs)), key=lambda x:x[1])\n orig_order_parses = [block_parses[parse_idx] for (parse_idx, _) in parse_idx_to_sent_idx]\n orig_order_surps = [block_surprisals[parse_idx] for (parse_idx, _) in parse_idx_to_sent_idx]\n\n all_parses.extend(orig_order_parses)\n all_surprisals.extend(orig_order_surps)\n\n for parse in orig_order_parses:\n print(parse)\n\n def dump_history(beam_history, sents):\n for pointer, bucket_i, beam, word_completed in beam_history:\n print('pointer: {}, i: {}'.format(pointer, bucket_i))\n for batch_i in range(len(beam)):\n for b in beam[batch_i]:\n print('beam: b_i: {}, {}'.format(batch_i, b.dump(action_dict, sents[batch_i])))\n for b in word_completed[batch_i]:\n print('word_comp: b_i: {}, {}'.format(batch_i, b.dump(action_dict, sents[batch_i])))\n print()\n\n if args.particle_filter:\n def parse(tokens, subword_end_mask, return_beam_history = False, stack_size_bound = -1,speech_feats = None, device=None):\n if model.speech_feat_types:\n pause,dur,frames = get_sp_feats(args,speech_feats,device,model.speech_feat_types,model.tok_frame_len)\n else:\n pause = dur = frames = None\n return model.variable_beam_search(tokens, subword_end_mask, args.particle_size,\n args.original_reweight,\n stack_size_bound=stack_size_bound,\n pause=pause,dur=dur,frames=frames)\n else:\n def parse(tokens, subword_end_mask, return_beam_history=False, stack_size_bound = -1,speech_feats = None,device=None):\n if model.speech_feat_types:\n pause,dur,frames = get_sp_feats(args,speech_feats,device,model.speech_feat_types,model.tok_frame_len)\n else:\n pause = dur = frames = None\n\n return model.word_sync_beam_search(\n tokens, subword_end_mask, args.beam_size, args.word_beam_size, args.shift_size,\n return_beam_history=return_beam_history,\n stack_size_bound=stack_size_bound,\n pause=pause,dur=dur,frames=frames)\n\n def try_parse(tokens, subword_end_mask, stack_size_bound = None,speech_feats = None,device=None):\n\n stack_size_bound = args.stack_size_bound if stack_size_bound is None else -1\n if args.dump_beam:\n parses, surprisals, beam_history = parse(tokens, subword_end_mask, True, stack_size_bound,speech_feats,device)\n dump_history(beam_history, [dataset.sents[idx] for idx in batch_idx])\n else:\n parses, surprisals = parse(tokens, subword_end_mask, False, stack_size_bound,speech_feats,device)\n beam_history = None\n return parses, surprisals, beam_history\n\n start_time = time.time()\n with torch.no_grad():\n\n block_idxs = []\n block_parses = []\n block_surprisals = [] # This is subword-based surprisal in general. Conversion to word-level surprisal is done at output phase.\n batches = [batch for batch in dataset.test_batches(\n args.block_size, max_length_diff=args.max_length_diff)]\n\n for batch in tqdm(batches): \n\n #tokens, subword_end_mask, batch_idx = batch\n tokens, speech_feats, subword_end_mask, batch_idx = batch\n tokens = tokens.to(device)\n subword_end_mask = subword_end_mask.to(device)\n\n parses, surprisals, beam_history = try_parse( tokens, subword_end_mask,speech_feats=speech_feats,device=device)\n if any(len(p) == 0 for p in parses):\n # parse failure (on some tree in a batch)\n failed_sents = [(idx, \" \".join(dataset.sents[idx].orig_tokens)) for p, idx\n in zip(parses, batch_idx) if len(p) == 0]\n failed_sents_str = \"\\n\".join(\"{}: {}\".format(i, s) for i, s in failed_sents)\n logger.warning(\"Parse failure occurs on the following \"\n \"sentence(s):\\n{}\".format(failed_sents_str))\n if args.stack_size_bound < tokens.size(1):\n # one reason of parse failure is stack is filled with many tokens with only\n # a very little nonterminals at the beginning of stack\n # (e.g., (NP x x x x x x x ... ). Reduce is impossible and thus the parser is\n # stuck.\n # Here, we try to reparse the sentence with increased stack size\n # by setting it -1, which will be (sent_len+10) internally in beam search.\n logger.warning(\"Current sack size bound {} is smaller than \"\n \"sentence length {}.\".format(args.stack_size_bound, tokens.size(1)))\n logger.warning(\"Try to reparse with increased stack size bound...\")\n parses, surprisals, beam_history = try_parse(tokens, subword_end_mask,\n stack_size_bound = -1,\n speech_feats=speech_feats,\n device=device)\n\n best_actions = [p[0][0] for p in parses] # p[0][1] is likelihood\n subword_end_mask = subword_end_mask.cpu().numpy()\n for i in range(len(batch_idx)):\n trees = [action_dict.build_tree_str(best_actions[i],\n dataset.sents[batch_idx[i]].orig_tokens,\n dataset.sents[batch_idx[i]].tags,\n subword_end_mask[i]) for i in range(len(batch_idx))]\n block_idxs.extend(batch_idx)\n block_parses.extend(trees)\n block_surprisals.extend(surprisals)\n cur_block_size += tokens.size(0)\n\n if cur_block_size >= args.block_size:\n assert cur_block_size == args.block_size\n sort_and_print_trees(block_idxs, block_parses, block_surprisals)\n block_idxs = []\n block_parses = []\n block_surprisals = []\n cur_block_size = 0\n\n sort_and_print_trees(block_idxs, block_parses, block_surprisals)\n end_time = time.time()\n\n with open(args.lm_output_file, 'wt') as o:\n all_word_surps = []\n for sent_i, (sent, surp) in enumerate(zip(dataset.sents, all_surprisals)):\n orig_tokens = sent.orig_tokens\n input_tokens = [vocab.id_to_word(t_id) for t_id in sent.token_ids]\n subword_spans = sent.get_subword_span_index()\n piece_surp = [[surp[j] for j in span] for span in subword_spans]\n word_surp = [sum(s) for s in piece_surp]\n all_word_surps.append(word_surp)\n pieces = [[input_tokens[j] for j in span] for span in subword_spans]\n assert len(orig_tokens) == len(word_surp)\n for t_i in range(len(orig_tokens)):\n orig_t = orig_tokens[t_i]\n mod_t = \" \".join(pieces[t_i])\n s = word_surp[t_i]\n ps = \" \".join([str(x) for x in piece_surp[t_i]])\n o.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(sent_i, t_i, orig_t, mod_t, s, ps))\n o.write('-----------------------------------\\n')\n\n ll = -sum([sum(surp) for surp in all_word_surps])\n num_words = sum([len(surp) for surp in all_word_surps])\n ppl = np.exp(-ll / num_words)\n o.write('perplexity: {} Time: {} Throughput: {}'.format(\n ppl, end_time - start_time, (len(dataset.sents)) / (end_time-start_time)))\n\nif __name__ == '__main__':\n args = parser.parse_args()\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s:%(name)s:%(levelname)s: %(message)s',\n )\n\n main(args)\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.cuda.empty_cache", "torch.no_grad", "torch.cuda.is_available", "torch.device", "numpy.exp" ] ]
ripon1763/hdss_chakaria
[ "0b8484c14c3596defab6133860c598b23bb07a34" ]
[ "python/sav.py" ]
[ "import sys \nimport pandas as pd \nimport json\nimport os\nimport numpy as np\nimport pyreadstat\n\n\n#opening the json file\nf = open(sys.argv[1]+\".json\", \"r\")\nx=f.read()\n\n# parse x:\ndata = json.loads(x)\n\n# Creates DataFrame. \ndf = pd.DataFrame(data)\n\ndf = df.fillna(value=np.nan)\n\nf.close()\n \n#print(df)\n\npyreadstat.write_sav(df,sys.argv[2]+'.sav')\n\n#deleting the json file\n\nos.remove(sys.argv[1]+\".json\") \n" ]
[ [ "pandas.DataFrame" ] ]
EvenOldridge/NVTabular
[ "85333ae754c0512f7b213a4e98117a1501500dda" ]
[ "tests/unit/test_ops.py" ]
[ "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport math\nimport os\nimport string\n\nimport cudf\nimport dask_cudf\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom cudf.tests.utils import assert_eq\nfrom pandas.api.types import is_integer_dtype\n\nimport nvtabular as nvt\nimport nvtabular.io\nfrom nvtabular import ops as ops\nfrom tests.conftest import get_cats, mycols_csv, mycols_pq\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\n# TODO: dask workflow doesn't support min/max on string columns, so won't work\n# with op_columns=None\[email protected](\"op_columns\", [[\"x\"]])\ndef test_minmax(tmpdir, client, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_name = [\"label\"]\n\n config = nvtabular.workflow.get_new_config()\n config[\"PP\"][\"all\"] = [ops.MinMax(columns=op_columns)]\n\n processor = nvtabular.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n processor.update_stats(dataset)\n x_min = df[\"x\"].min()\n\n assert x_min == pytest.approx(processor.stats[\"mins\"][\"x\"], 1e-2)\n x_max = df[\"x\"].max()\n assert x_max == pytest.approx(processor.stats[\"maxs\"][\"x\"], 1e-2)\n if not op_columns:\n name_min = min(df[\"name-string\"].tolist())\n name_max = max(df[\"name-string\"].tolist())\n assert name_min == processor.stats[\"mins\"][\"name-string\"]\n y_max = df[\"y\"].max()\n y_min = df[\"y\"].min()\n assert y_max == processor.stats[\"maxs\"][\"y\"]\n assert name_max == processor.stats[\"maxs\"][\"name-string\"]\n assert y_min == processor.stats[\"mins\"][\"y\"]\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\ndef test_moments(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\", \"id\"]\n label_name = [\"label\"]\n\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"continuous\"] = [ops.Moments(columns=op_columns)]\n\n processor = nvtabular.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n\n processor.update_stats(dataset)\n\n assert df.x.count() == processor.stats[\"counts\"][\"x\"]\n assert df.x.count() == 4321\n\n # Check mean and std\n assert math.isclose(df.x.mean(), processor.stats[\"means\"][\"x\"], rel_tol=1e-4)\n assert math.isclose(df.x.std(), processor.stats[\"stds\"][\"x\"], rel_tol=1e-3)\n if not op_columns:\n assert math.isclose(df.y.mean(), processor.stats[\"means\"][\"y\"], rel_tol=1e-4)\n assert math.isclose(df.id.mean(), processor.stats[\"means\"][\"id\"], rel_tol=1e-4)\n\n assert math.isclose(df.y.std(), processor.stats[\"stds\"][\"y\"], rel_tol=1e-3)\n assert math.isclose(df.id.std(), processor.stats[\"stds\"][\"id\"], rel_tol=1e-3)\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"name-string\"], None])\ndef test_encoder(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\", \"id\"]\n label_name = [\"label\"]\n\n encoder = ops.GroupbyStatistics(columns=op_columns)\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"categorical\"] = [encoder]\n\n processor = nvt.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n processor.update_stats(dataset)\n\n if engine == \"parquet\" and not op_columns:\n cats_expected0 = df[\"name-cat\"].unique().values_host\n cats0 = get_cats(processor, \"name-cat\")\n assert cats0.tolist() == [None] + cats_expected0.tolist()\n\n cats_expected1 = df[\"name-string\"].unique().values_host\n cats1 = get_cats(processor, \"name-string\")\n assert cats1.tolist() == [None] + cats_expected1.tolist()\n\n\[email protected](\"engine\", [\"parquet\"])\[email protected](\"groups\", [[[\"name-cat\", \"name-string\"], \"name-cat\"], \"name-string\"])\[email protected](\"concat_groups\", [True, False])\ndef test_multicolumn_cats(tmpdir, df, dataset, engine, groups, concat_groups):\n cat_names = [\"name-cat\", \"name-string\"]\n cont_names = [\"x\", \"y\", \"id\"]\n label_name = [\"label\"]\n\n encoder = ops.GroupbyStatistics(\n columns=groups,\n cont_names=None if concat_groups else [\"x\"],\n stats=None if concat_groups else [\"count\", \"mean\"],\n out_path=str(tmpdir),\n concat_groups=concat_groups,\n )\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"categorical\"] = [encoder]\n\n processor = nvt.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n processor.update_stats(dataset)\n\n groups = [groups] if isinstance(groups, str) else groups\n for group in groups:\n group = [group] if isinstance(group, str) else group\n prefix = \"unique.\" if concat_groups else \"cat_stats.\"\n fn = prefix + \"_\".join(group) + \".parquet\"\n cudf.read_parquet(os.path.join(tmpdir, \"categories\", fn))\n\n\[email protected](\"engine\", [\"parquet\"])\[email protected](\"groups\", [[[\"name-cat\", \"name-string\"]], \"name-string\"])\[email protected](\"kfold\", [3])\ndef test_groupby_folds(tmpdir, df, dataset, engine, groups, kfold):\n cat_names = [\"name-cat\", \"name-string\"]\n cont_names = [\"x\", \"y\", \"id\"]\n label_name = [\"label\"]\n\n gb_stats = ops.GroupbyStatistics(\n columns=None,\n out_path=str(tmpdir),\n kfold=kfold,\n fold_groups=groups,\n stats=[\"count\", \"sum\"],\n cont_names=[\"y\"],\n )\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"categorical\"] = [gb_stats]\n\n processor = nvt.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n processor.update_stats(dataset)\n for group, path in processor.stats[\"categories\"].items():\n df = cudf.read_parquet(path)\n assert \"__fold__\" in df.columns\n\n\[email protected](\"cat_groups\", [\"Author\", [[\"Author\", \"Engaging-User\"]]])\[email protected](\"kfold\", [1, 3])\[email protected](\"fold_seed\", [None, 42])\ndef test_target_encode(tmpdir, cat_groups, kfold, fold_seed):\n df = cudf.DataFrame(\n {\n \"Author\": list(string.ascii_uppercase),\n \"Engaging-User\": list(string.ascii_lowercase),\n \"Cost\": range(26),\n \"Post\": [0, 1] * 13,\n }\n )\n df = dask_cudf.from_cudf(df, npartitions=3)\n\n cat_names = [\"Author\", \"Engaging-User\"]\n cont_names = [\"Cost\"]\n label_name = [\"Post\"]\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n ops.TargetEncoding(\n cat_groups,\n \"Cost\", # cont_target\n out_path=str(tmpdir),\n kfold=kfold,\n out_col=\"test_name\",\n out_dtype=\"float32\",\n fold_seed=fold_seed,\n drop_folds=False, # Keep folds to validate\n )\n )\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n assert \"test_name\" in df_out.columns\n assert df_out[\"test_name\"].dtype == \"float32\"\n\n if kfold > 1:\n # Cat columns are unique.\n # Make sure __fold__ mapping is correct\n if cat_groups == \"Author\":\n name = \"__fold___Author\"\n cols = [\"__fold__\", \"Author\"]\n else:\n name = \"__fold___Author_Engaging-User\"\n cols = [\"__fold__\", \"Author\", \"Engaging-User\"]\n check = cudf.io.read_parquet(processor.stats[\"te_stats\"][name])\n check = check[cols].sort_values(cols).reset_index(drop=True)\n df_out_check = df_out[cols].sort_values(cols).reset_index(drop=True)\n assert_eq(check, df_out_check)\n\n\[email protected](\"npartitions\", [1, 2])\ndef test_target_encode_multi(tmpdir, npartitions):\n\n cat_1 = np.asarray([\"baaaa\"] * 12)\n cat_2 = np.asarray([\"baaaa\"] * 6 + [\"bbaaa\"] * 3 + [\"bcaaa\"] * 3)\n num_1 = np.asarray([1, 1, 2, 2, 2, 1, 1, 5, 4, 4, 4, 4])\n df = cudf.DataFrame({\"cat\": cat_1, \"cat2\": cat_2, \"num\": num_1})\n df = dask_cudf.from_cudf(df, npartitions=npartitions)\n\n cat_names = [\"cat\", \"cat2\"]\n cont_names = [\"num\"]\n label_name = []\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n cat_groups = [\"cat\", \"cat2\", [\"cat\", \"cat2\"]]\n\n processor.add_preprocess(\n ops.TargetEncoding(\n cat_groups,\n \"num\", # cont_target\n out_path=str(tmpdir),\n kfold=1,\n p_smooth=5,\n out_dtype=\"float32\",\n )\n )\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n assert \"TE_cat_cat2_num\" in df_out.columns\n assert \"TE_cat_num\" in df_out.columns\n assert \"TE_cat2_num\" in df_out.columns\n\n assert_eq(df_out[\"TE_cat2_num\"].values, df_out[\"TE_cat_cat2_num\"].values)\n assert df_out[\"TE_cat_num\"].iloc[0] != df_out[\"TE_cat2_num\"].iloc[0]\n assert math.isclose(df_out[\"TE_cat_num\"].iloc[0], num_1.mean(), abs_tol=1e-4)\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\ndef test_median(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\", \"id\"]\n label_name = [\"label\"]\n\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"continuous\"] = [ops.Median(columns=op_columns)]\n\n processor = nvt.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n\n processor.update_stats(dataset)\n\n # Check median (TODO: Improve the accuracy)\n x_median = df.x.dropna().quantile(0.5, interpolation=\"linear\")\n assert math.isclose(x_median, processor.stats[\"medians\"][\"x\"], rel_tol=1e1)\n if not op_columns:\n y_median = df.y.dropna().quantile(0.5, interpolation=\"linear\")\n id_median = df.id.dropna().quantile(0.5, interpolation=\"linear\")\n assert math.isclose(y_median, processor.stats[\"medians\"][\"y\"], rel_tol=1e1)\n assert math.isclose(id_median, processor.stats[\"medians\"][\"id\"], rel_tol=1e1)\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\ndef test_log(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cont_names = [\"x\", \"y\", \"id\"]\n log_op = ops.LogOp(columns=op_columns)\n\n columns_ctx = {}\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = cont_names\n\n for gdf in dataset.to_iter():\n new_gdf = log_op.apply_op(gdf, columns_ctx, \"continuous\")\n assert new_gdf[cont_names] == np.log(gdf[cont_names].astype(np.float32))\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"name-string\"], None])\ndef test_hash_bucket(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-string\"]\n\n if op_columns is None:\n num_buckets = 10\n else:\n num_buckets = {column: 10 for column in op_columns}\n hash_bucket_op = ops.HashBucket(num_buckets)\n\n columns_ctx = {}\n columns_ctx[\"categorical\"] = {}\n columns_ctx[\"categorical\"][\"base\"] = cat_names\n\n # check sums for determinancy\n checksums = []\n for gdf in dataset.to_iter():\n new_gdf = hash_bucket_op.apply_op(gdf, columns_ctx, \"categorical\")\n assert np.all(new_gdf[cat_names].values >= 0)\n assert np.all(new_gdf[cat_names].values <= 9)\n checksums.append(new_gdf[cat_names].sum().values)\n\n for checksum, gdf in zip(checksums, dataset.to_iter()):\n new_gdf = hash_bucket_op.apply_op(gdf, columns_ctx, \"categorical\")\n assert np.all(new_gdf[cat_names].sum().values == checksum)\n\n\[email protected](\"engine\", [\"parquet\"])\ndef test_fill_missing(tmpdir, df, dataset, engine):\n op = nvt.ops.FillMissing(42)\n\n cont_names = [\"x\", \"y\"]\n columns_ctx = {}\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = cont_names\n for col in cont_names:\n idx = np.random.choice(df.shape[0] - 1, int(df.shape[0] * 0.2))\n df[col].iloc[idx] = None\n\n transformed = cudf.concat([op.apply_op(df, columns_ctx, \"continuous\")])\n assert_eq(transformed[cont_names], df[cont_names].fillna(42))\n\n\[email protected](\"engine\", [\"parquet\"])\ndef test_dropna(tmpdir, df, dataset, engine):\n dropna = ops.Dropna()\n columns = mycols_pq if engine == \"parquet\" else mycols_csv\n\n columns_ctx = {}\n columns_ctx[\"all\"] = {}\n columns_ctx[\"all\"][\"base\"] = columns\n\n for gdf in dataset.to_iter():\n new_gdf = dropna.apply_op(gdf, columns_ctx, \"all\")\n assert new_gdf.columns.all() == gdf.columns.all()\n assert new_gdf.isnull().all().sum() < 1, \"null values exist\"\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\ndef test_normalize(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_name = [\"label\"]\n\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"continuous\"] = [ops.Moments(columns=op_columns)]\n\n processor = nvtabular.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n\n processor.update_stats(dataset)\n\n op = ops.Normalize()\n\n columns_ctx = {}\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = op_columns or cont_names\n\n new_gdf = op.apply_op(df, columns_ctx, \"continuous\", stats_context=processor.stats)\n df[\"x\"] = (df[\"x\"] - processor.stats[\"means\"][\"x\"]) / processor.stats[\"stds\"][\"x\"]\n assert new_gdf[\"x\"].equals(df[\"x\"])\n\n\[email protected](\"gpu_memory_frac\", [0.01, 0.1])\[email protected](\"engine\", [\"parquet\", \"csv\", \"csv-no-header\"])\[email protected](\"op_columns\", [[\"x\"], None])\ndef test_normalize_minmax(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):\n cat_names = [\"name-cat\", \"name-string\"] if engine == \"parquet\" else [\"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_name = [\"label\"]\n\n config = nvt.workflow.get_new_config()\n config[\"PP\"][\"continuous\"] = [ops.MinMax()]\n\n processor = nvtabular.Workflow(\n cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config\n )\n\n processor.update_stats(dataset)\n\n op = ops.NormalizeMinMax()\n\n columns_ctx = {}\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = cont_names\n\n new_gdf = op.apply_op(df, columns_ctx, \"continuous\", stats_context=processor.stats)\n df[\"x\"] = (df[\"x\"] - processor.stats[\"mins\"][\"x\"]) / (\n processor.stats[\"maxs\"][\"x\"] - processor.stats[\"mins\"][\"x\"]\n )\n assert new_gdf[\"x\"].equals(df[\"x\"])\n\n\[email protected](\"gpu_memory_frac\", [0.1])\[email protected](\"engine\", [\"parquet\"])\ndef test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):\n cat_names = [\"name-cat\", \"name-string\"]\n cont_names = [\"x\", \"y\"]\n label_name = [\"label\"]\n columns = mycols_pq if engine == \"parquet\" else mycols_csv\n\n df_copy = df.copy()\n\n config = nvt.workflow.get_new_config()\n\n processor = nvtabular.Workflow(\n cat_names=cat_names,\n cont_names=cont_names,\n label_name=label_name,\n config=config,\n client=client,\n )\n\n columns_ctx = {}\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = cont_names\n columns_ctx[\"all\"] = {}\n columns_ctx[\"all\"][\"base\"] = columns\n\n # Substring\n # Replacement\n op = ops.LambdaOp(\n op_name=\"slice\",\n f=lambda col, gdf: col.str.slice(1, 3),\n columns=[\"name-cat\", \"name-string\"],\n replace=True,\n )\n\n new_gdf = op.apply_op(df, columns_ctx, \"all\", stats_context=None)\n assert new_gdf[\"name-cat\"].equals(df_copy[\"name-cat\"].str.slice(1, 3))\n assert new_gdf[\"name-string\"].equals(df_copy[\"name-string\"].str.slice(1, 3))\n\n # No Replacement\n df = df_copy.copy()\n op = ops.LambdaOp(\n op_name=\"slice\",\n f=lambda col, gdf: col.str.slice(1, 3),\n columns=[\"name-cat\", \"name-string\"],\n replace=False,\n )\n new_gdf = op.apply_op(df, columns_ctx, \"all\", stats_context=None)\n assert new_gdf[\"name-cat_slice\"].equals(df_copy[\"name-cat\"].str.slice(1, 3))\n assert new_gdf[\"name-string_slice\"].equals(df_copy[\"name-string\"].str.slice(1, 3))\n assert new_gdf[\"name-cat\"].equals(df_copy[\"name-cat\"])\n assert new_gdf[\"name-string\"].equals(df_copy[\"name-string\"])\n\n # Replace\n # Replacement\n df = df_copy.copy()\n op = ops.LambdaOp(\n op_name=\"replace\",\n f=lambda col, gdf: col.str.replace(\"e\", \"XX\"),\n columns=[\"name-cat\", \"name-string\"],\n replace=True,\n )\n\n new_gdf = op.apply_op(df, columns_ctx, \"all\", stats_context=None)\n assert new_gdf[\"name-cat\"].equals(df_copy[\"name-cat\"].str.replace(\"e\", \"XX\"))\n assert new_gdf[\"name-string\"].equals(df_copy[\"name-string\"].str.replace(\"e\", \"XX\"))\n\n # No Replacement\n df = df_copy.copy()\n op = ops.LambdaOp(\n op_name=\"replace\",\n f=lambda col, gdf: col.str.replace(\"e\", \"XX\"),\n columns=[\"name-cat\", \"name-string\"],\n replace=False,\n )\n new_gdf = op.apply_op(df, columns_ctx, \"all\", stats_context=None)\n assert new_gdf[\"name-cat_replace\"].equals(df_copy[\"name-cat\"].str.replace(\"e\", \"XX\"))\n assert new_gdf[\"name-string_replace\"].equals(df_copy[\"name-string\"].str.replace(\"e\", \"XX\"))\n assert new_gdf[\"name-cat\"].equals(df_copy[\"name-cat\"])\n assert new_gdf[\"name-string\"].equals(df_copy[\"name-string\"])\n\n # astype\n # Replacement\n df = df_copy.copy()\n op = ops.LambdaOp(\n op_name=\"astype\", f=lambda col, gdf: col.astype(float), columns=[\"id\"], replace=True\n )\n new_gdf = op.apply_op(df, columns_ctx, \"all\", stats_context=None)\n assert new_gdf[\"id\"].dtype == \"float64\"\n\n # Workflow\n # Replacement\n import glob\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n [\n ops.LambdaOp(\n op_name=\"slice\",\n f=lambda col, gdf: col.astype(str).str.slice(0, 1),\n columns=[\"name-cat\"],\n replace=True,\n ),\n ops.Categorify(),\n ]\n )\n processor.finalize()\n processor.update_stats(dataset)\n outdir = tmpdir.mkdir(\"out1\")\n processor.write_to_dataset(\n outdir, dataset, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION, apply_ops=True\n )\n\n dataset_2 = nvtabular.io.Dataset(\n glob.glob(str(outdir) + \"/*.parquet\"), part_mem_fraction=gpu_memory_frac\n )\n df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)\n assert is_integer_dtype(df_pp[\"name-cat\"].dtype)\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n [\n ops.Categorify(),\n ops.LambdaOp(op_name=\"add100\", f=lambda col, gdf: col + 100, replace=True),\n ]\n )\n processor.finalize()\n processor.update_stats(dataset)\n outdir = tmpdir.mkdir(\"out2\")\n processor.write_to_dataset(\n outdir, dataset, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION, apply_ops=True\n )\n\n dataset_2 = nvtabular.io.Dataset(\n glob.glob(str(outdir) + \"/*.parquet\"), part_mem_fraction=gpu_memory_frac\n )\n df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)\n assert is_integer_dtype(df_pp[\"name-cat\"].dtype)\n assert np.sum(df_pp[\"name-cat\"] < 100) == 0\n\n # Workflow\n # No Replacement\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n [\n ops.LambdaOp(\n op_name=\"slice\",\n f=lambda col, gdf: col.astype(str).str.slice(0, 1),\n columns=[\"name-cat\"],\n replace=False,\n ),\n ops.Categorify(),\n ]\n )\n processor.finalize()\n processor.update_stats(dataset)\n outdir = tmpdir.mkdir(\"out3\")\n processor.write_to_dataset(\n outdir, dataset, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION, apply_ops=True\n )\n dataset_2 = nvtabular.io.Dataset(\n glob.glob(str(outdir) + \"/*.parquet\"), part_mem_fraction=gpu_memory_frac\n )\n df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)\n\n assert df_pp[\"name-cat\"].dtype == \"O\"\n print(df_pp)\n assert is_integer_dtype(df_pp[\"name-cat_slice\"].dtype)\n assert np.sum(df_pp[\"name-cat_slice\"] == 0) == 0\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n [\n ops.Categorify(),\n ops.LambdaOp(op_name=\"add100\", f=lambda col, gdf: col + 100, replace=False),\n ]\n )\n processor.finalize()\n processor.update_stats(dataset)\n outdir = tmpdir.mkdir(\"out4\")\n processor.write_to_dataset(\n outdir, dataset, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION, apply_ops=True\n )\n\n dataset_2 = nvtabular.io.Dataset(\n glob.glob(str(outdir) + \"/*.parquet\"), part_mem_fraction=gpu_memory_frac\n )\n df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)\n assert is_integer_dtype(df_pp[\"name-cat_add100\"].dtype)\n assert np.sum(df_pp[\"name-cat_add100\"] < 100) == 0\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n [\n ops.LambdaOp(op_name=\"mul0\", f=lambda col, gdf: col * 0, columns=[\"x\"], replace=False),\n ops.LambdaOp(op_name=\"add100\", f=lambda col, gdf: col + 100, replace=False),\n ]\n )\n processor.finalize()\n processor.update_stats(dataset)\n outdir = tmpdir.mkdir(\"out5\")\n processor.write_to_dataset(\n outdir, dataset, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION, apply_ops=True\n )\n\n dataset_2 = nvtabular.io.Dataset(\n glob.glob(str(outdir) + \"/*.parquet\"), part_mem_fraction=gpu_memory_frac\n )\n df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)\n assert np.sum(df_pp[\"x_mul0_add100\"] < 100) == 0\n\n\[email protected](\"groups\", [[[\"Author\", \"Engaging User\"]], None])\[email protected](\"kind\", [\"joint\", \"combo\"])\ndef test_categorify_multi(tmpdir, groups, kind):\n\n df = pd.DataFrame(\n {\n \"Author\": [\"User_A\", \"User_E\", \"User_B\", \"User_C\"],\n \"Engaging User\": [\"User_B\", \"User_B\", \"User_A\", \"User_D\"],\n \"Post\": [1, 2, 3, 4],\n }\n )\n\n cat_names = [\"Author\", \"Engaging User\"]\n cont_names = []\n label_name = [\"Post\"]\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(ops.Categorify(columns=groups, out_path=str(tmpdir), encode_type=kind))\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n if groups:\n if kind == \"joint\":\n # Columns are encoded jointly\n assert df_out[\"Author\"].to_arrow().to_pylist() == [1, 5, 2, 3]\n assert df_out[\"Engaging User\"].to_arrow().to_pylist() == [2, 2, 1, 4]\n else:\n # Column combinations are encoded\n assert df_out[\"Author_Engaging User\"].to_arrow().to_pylist() == [1, 4, 2, 3]\n else:\n # Columns are encoded independently\n assert df_out[\"Author\"].to_arrow().to_pylist() == [1, 4, 2, 3]\n assert df_out[\"Engaging User\"].to_arrow().to_pylist() == [2, 2, 1, 3]\n\n\ndef test_categorify_multi_combo(tmpdir):\n\n groups = [[\"Author\", \"Engaging User\"], [\"Author\"], \"Engaging User\"]\n kind = \"combo\"\n df = pd.DataFrame(\n {\n \"Author\": [\"User_A\", \"User_E\", \"User_B\", \"User_C\"],\n \"Engaging User\": [\"User_B\", \"User_B\", \"User_A\", \"User_D\"],\n \"Post\": [1, 2, 3, 4],\n }\n )\n\n cat_names = [\"Author\", \"Engaging User\"]\n cont_names = []\n label_name = [\"Post\"]\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(ops.Categorify(columns=groups, out_path=str(tmpdir), encode_type=kind))\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n # Column combinations are encoded\n assert df_out[\"Author\"].to_arrow().to_pylist() == [1, 4, 2, 3]\n assert df_out[\"Engaging User\"].to_arrow().to_pylist() == [2, 2, 1, 3]\n assert df_out[\"Author_Engaging User\"].to_arrow().to_pylist() == [1, 4, 2, 3]\n\n\[email protected](\"freq_limit\", [None, 0, {\"Author\": 3, \"Engaging User\": 4}])\ndef test_categorify_freq_limit(tmpdir, freq_limit):\n df = pd.DataFrame(\n {\n \"Author\": [\n \"User_A\",\n \"User_E\",\n \"User_B\",\n \"User_C\",\n \"User_A\",\n \"User_E\",\n \"User_B\",\n \"User_C\",\n \"User_B\",\n \"User_C\",\n ],\n \"Engaging User\": [\n \"User_B\",\n \"User_B\",\n \"User_A\",\n \"User_D\",\n \"User_B\",\n \"User_c\",\n \"User_A\",\n \"User_D\",\n \"User_D\",\n \"User_D\",\n ],\n }\n )\n\n cat_names = [\"Author\", \"Engaging User\"]\n cont_names = []\n label_name = []\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n ops.Categorify(columns=cat_names, freq_threshold=freq_limit, out_path=str(tmpdir))\n )\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n # Column combinations are encoded\n if isinstance(freq_limit, dict):\n assert df_out[\"Author\"].max() == 2\n assert df_out[\"Engaging User\"].max() == 1\n else:\n assert len(df[\"Author\"].unique()) == df_out[\"Author\"].max()\n assert len(df[\"Engaging User\"].unique()) == df_out[\"Engaging User\"].max()\n\n\[email protected](\"groups\", [[[\"Author\", \"Engaging-User\"]], \"Author\"])\ndef test_joingroupby_multi(tmpdir, groups):\n\n df = pd.DataFrame(\n {\n \"Author\": [\"User_A\", \"User_A\", \"User_A\", \"User_B\"],\n \"Engaging-User\": [\"User_B\", \"User_B\", \"User_C\", \"User_C\"],\n \"Cost\": [100.0, 200.0, 300.0, 400.0],\n \"Post\": [1, 2, 3, 4],\n }\n )\n\n cat_names = [\"Author\", \"Engaging-User\"]\n cont_names = [\"Cost\"]\n label_name = [\"Post\"]\n\n processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)\n\n processor.add_preprocess(\n ops.JoinGroupby(columns=groups, out_path=str(tmpdir), stats=[\"sum\"], cont_names=[\"Cost\"])\n )\n processor.finalize()\n processor.apply(nvt.Dataset(df), output_format=None)\n df_out = processor.get_ddf().compute(scheduler=\"synchronous\")\n\n if isinstance(groups, list):\n # Join on [\"Author\", \"Engaging-User\"]\n assert df_out[\"Author_Engaging-User_Cost_sum\"].to_arrow().to_pylist() == [\n 300.0,\n 300.0,\n 300.0,\n 400.0,\n ]\n else:\n # Join on [\"Author\"]\n assert df_out[\"Author_Cost_sum\"].to_arrow().to_pylist() == [600.0, 600.0, 600.0, 400.0]\n\n\[email protected](\"engine\", [\"parquet\"])\[email protected](\"kind_ext\", [\"cudf\", \"pandas\", \"arrow\", \"parquet\", \"csv\"])\[email protected](\"cache\", [\"host\", \"device\"])\[email protected](\"how\", [\"left\", \"inner\"])\[email protected](\"drop_duplicates\", [True, False])\ndef test_join_external(tmpdir, df, dataset, engine, kind_ext, cache, how, drop_duplicates):\n\n # Define \"external\" table\n shift = 100\n df_ext = df[[\"id\"]].copy().sort_values(\"id\")\n df_ext[\"new_col\"] = df_ext[\"id\"] + shift\n df_ext[\"new_col_2\"] = \"keep\"\n df_ext[\"new_col_3\"] = \"ignore\"\n df_ext_check = df_ext.copy()\n if kind_ext == \"pandas\":\n df_ext = df_ext.to_pandas()\n elif kind_ext == \"arrow\":\n df_ext = df_ext.to_arrow()\n elif kind_ext == \"parquet\":\n path = tmpdir.join(\"external.parquet\")\n df_ext.to_parquet(path)\n df_ext = path\n elif kind_ext == \"csv\":\n path = tmpdir.join(\"external.csv\")\n df_ext.to_csv(path)\n df_ext = path\n\n # Define Op\n on = \"id\"\n columns_ext = [\"id\", \"new_col\", \"new_col_2\"]\n df_ext_check = df_ext_check[columns_ext]\n if drop_duplicates:\n df_ext_check.drop_duplicates(ignore_index=True, inplace=True)\n merge_op = ops.JoinExternal(\n df_ext,\n on,\n how=how,\n columns_ext=columns_ext,\n cache=cache,\n drop_duplicates_ext=drop_duplicates,\n )\n columns = mycols_pq if engine == \"parquet\" else mycols_csv\n columns_ctx = {}\n columns_ctx[\"all\"] = {}\n columns_ctx[\"all\"][\"base\"] = columns\n\n # Iterate, apply op, and check result\n for gdf in dataset.to_iter():\n new_gdf = merge_op.apply_op(gdf, columns_ctx, \"all\")\n check_gdf = gdf.merge(df_ext_check, how=how, on=on)\n assert len(check_gdf) == len(new_gdf)\n assert (new_gdf[\"id\"] + shift).all() == new_gdf[\"new_col\"].all()\n assert gdf[\"id\"].all() == new_gdf[\"id\"].all()\n assert \"new_col_2\" in new_gdf.columns\n assert \"new_col_3\" not in new_gdf.columns\n\n\[email protected](\"gpu_memory_frac\", [0.1])\[email protected](\"engine\", [\"parquet\"])\ndef test_filter(tmpdir, df, dataset, gpu_memory_frac, engine, client):\n\n cont_names = [\"x\", \"y\"]\n\n columns = mycols_pq if engine == \"parquet\" else mycols_csv\n columns_ctx = {}\n columns_ctx[\"all\"] = {}\n columns_ctx[\"all\"][\"base\"] = columns\n\n filter_op = ops.Filter(f=lambda df: df[df[\"y\"] > 0.5])\n new_gdf = filter_op.apply_op(df, columns_ctx, \"all\", target_cols=columns)\n assert new_gdf.columns.all() == df.columns.all()\n\n # return isnull() rows\n columns_ctx[\"continuous\"] = {}\n columns_ctx[\"continuous\"][\"base\"] = cont_names\n\n for col in cont_names:\n idx = np.random.choice(df.shape[0] - 1, int(df.shape[0] * 0.2))\n df[col].iloc[idx] = None\n\n filter_op = ops.Filter(f=lambda df: df[df.x.isnull()])\n new_gdf = filter_op.apply_op(df, columns_ctx, \"all\", target_cols=columns)\n assert new_gdf.columns.all() == df.columns.all()\n assert new_gdf.shape[0] < df.shape[0], \"null values do not exist\"\n\n\ndef test_difference_lag():\n df = cudf.DataFrame(\n {\"userid\": [0, 0, 0, 1, 1, 2], \"timestamp\": [1000, 1005, 1100, 2000, 2001, 3000]}\n )\n\n columns = [\"userid\", \"timestamp\"]\n columns_ctx = {}\n columns_ctx[\"all\"] = {}\n columns_ctx[\"all\"][\"base\"] = columns\n\n op = ops.DifferenceLag(\"userid\", columns=[\"timestamp\"])\n new_gdf = op.apply_op(df, columns_ctx, \"all\", target_cols=[\"timestamp\"])\n\n assert new_gdf[\"timestamp_DifferenceLag\"][0] is None\n assert new_gdf[\"timestamp_DifferenceLag\"][1] == 5\n assert new_gdf[\"timestamp_DifferenceLag\"][2] == 95\n assert new_gdf[\"timestamp_DifferenceLag\"][3] is None\n" ]
[ [ "numpy.asarray", "pandas.DataFrame", "numpy.all", "pandas.api.types.is_integer_dtype", "numpy.sum" ] ]
wanghh2000/yolov4-tf2
[ "d91707f9ae79e04d62a630501a70b72e2fc99e0f" ]
[ "video.py" ]
[ "#-------------------------------------#\r\n# 调用摄像头检测\r\n#-------------------------------------#\r\nfrom yolo import YOLO\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nimport tensorflow as tf\r\n\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\r\n\r\nyolo = YOLO()\r\n# 调用摄像头\r\ncapture=cv2.VideoCapture(0) # capture=cv2.VideoCapture(\"1.mp4\")\r\nfps = 0.0\r\nwhile(True):\r\n t1 = time.time()\r\n # 读取某一帧\r\n ref,frame=capture.read()\r\n # 格式转变,BGRtoRGB\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\r\n # 转变成Image\r\n frame = Image.fromarray(np.uint8(frame))\r\n\r\n # 进行检测\r\n frame = np.array(yolo.detect_image(frame))\r\n\r\n # RGBtoBGR满足opencv显示格式\r\n frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\r\n \r\n fps = ( fps + (1./(time.time()-t1)) ) / 2\r\n print(\"fps= %.2f\"%(fps))\r\n frame = cv2.putText(frame, \"fps= %.2f\"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n \r\n cv2.imshow(\"video\",frame)\r\n c= cv2.waitKey(1) & 0xff \r\n if c==27:\r\n capture.release()\r\n break\r\n" ]
[ [ "numpy.uint8", "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.set_memory_growth" ] ]
dhaulagiri0/Mask_RCNN
[ "f352dcfdbc84cbb4266997f8781b91e84dff7f2b", "f352dcfdbc84cbb4266997f8781b91e84dff7f2b" ]
[ "angio2020/data_processing.py", "angio2020/interpolation.py" ]
[ "import cv2\nimport numpy as np\nimport matplotlib.cm as cm\nfrom matplotlib import pyplot as plt\nfrom pycocotools import mask as maskUtils\nfrom pathlib import Path\nimport json\n\n'''\nconverts a given mask image to run length encoding format (rle)\nrle itself is a dictionary containing:\n size: size of original binary map\n counts: the encoded binary map (bytes)\n\nrle is shorter than raw binary map array\n\nparameter threshold determines the minimum pixel value (ranges from 0 to 255) to be converted to 1 in bin map\nif no img is given image will be loaded from given path\n'''\n\ndef toBinMaskRle(img=None, path=None, threshold=10):\n if path:\n # read mask in grayscale format\n img = cv2.imread(path, 0)\n\n # create binary map by thresholding\n ret, binMap = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)\n\n #convert bin map to rle\n binMap = np.asfortranarray(binMap)\n rle = maskUtils.encode(binMap)\n rle['counts'] = str(rle['counts'], 'utf-8')\n return rle\n\ndef createJsonAnnotations(data_path, subset):\n # list of categories for the dataset\n categories = [{'id' : 1, 'name' : 'lad'}, {'id' : 2, 'name' : 'diagonal'}, {'id' : 3, 'name' : 'lcx1'}, {'id' : 4, 'name' : 'lcx2'}, {'id' : 5, 'name' : 'distal'}]\n category_to_id = {\n 'lad' : 1,\n 'diagonal' : 2,\n 'lcx1' : 3,\n 'lcx2' : 4,\n 'distal' : 5\n }\n # list of annotations\n annotations = []\n #list of images\n images = []\n\n p = Path(data_path)\n id_cnt = 0\n # navigates the items found in data folder\n for item in p.iterdir():\n # if item is a folder containing masks\n if item.is_dir():\n # image_id is the folder name\n image_id = item.name\n # iterate through all the masks\n for f in item.iterdir():\n # get rle and add to list of annotations\n print(f.name)\n category = f.name.split('_')[-1][:-4]\n if category in ['mask1', 'mask2', 'mask3', 'mask']:\n continue\n category_id = category_to_id[category]\n id_cnt += 1\n rle = toBinMaskRle(path=str(f))\n area = int(maskUtils.area(rle))\n bbox = maskUtils.toBbox(rle).tolist()\n annotations.append({\n 'id': id_cnt,\n 'iscrowd': 0,\n 'segmentation': rle,\n 'area': area,\n 'bbox': bbox,\n 'category_id' : category_id,\n 'image_id' : image_id,\n })\n \n # item is a image\n else:\n image_id = item.name.split('.')[0]\n img = cv2.imread(str(item), 0)\n height = img.shape[0]\n width = img.shape[1]\n # add image info to json\n images.append({\n 'filename': image_id + '.png',\n 'height':height,\n 'width':width,\n 'id': image_id\n })\n \n\n #create json\n jsonFile = {\n 'categories' : categories,\n 'annotations' : annotations,\n 'images' : images\n }\n\n with open(data_path + '/data_{}.json'.format(subset), 'w', encoding='utf-8') as f:\n json.dump(jsonFile, f, ensure_ascii=False, indent=4)\n\n\n\ncreateJsonAnnotations('B:/train', 'train')\ncreateJsonAnnotations('B:/val', 'val')\ncreateJsonAnnotations('B:/test', 'test')\n\n# rle = jpgToBinMaskRle(image_path)\n# print(rle)\n", "import numpy as np\nfrom scipy.interpolate import RectBivariateSpline\nimport cv2\nfrom decimal import Decimal\nimport math\nfrom math import sqrt\n\n# gets a point thats a certain unit (whatever value increment is)\n# away from the first the coordinates (x, y) along the given line\n# the point given will have a smaller x value than the given x\n# x, y should always be a point on the right of the next point on the x-axis\ndef getNextPoint(x, y, m, increment):\n if m == math.inf:\n x1 = x\n y1 = y + increment\n else:\n x1 = x - increment/sqrt(m**2 + 1)\n y1 = -m*(x - x1) + y\n return x1, y1\n\n# given two points, find points that are along the line\n# each point is one increment in absolute distance away from the previous point along the line\n# the sequence of the points given does not matter\ndef getPtsAlongLine(srcPt, dstPt, increment=0.1):\n x1, y1 = srcPt\n x2, y2 = dstPt\n xcoords = []\n ycoords = []\n # find out which point is on the right along the x-axis\n if x1 > x2:\n xcoords.append(x1)\n ycoords.append(y1)\n else:\n xcoords.append(x2)\n ycoords.append(y2)\n if x2 - x1 != 0:\n gradient = (y2 - y1) / (x2 - x1)\n distance = sqrt((x1 + x2)**2 + (y1 + y2)**2)\n steps = int(distance / increment)\n for _ in range(steps):\n # get next point to the left of the current one along the line\n x, y = getNextPoint(xcoords[-1], ycoords[-1], gradient, increment)\n xcoords.append(x)\n ycoords.append(y)\n else: \n # its a straight line parallel to the y-axis\n ycoords = np.arange(min(y1, y2), max(y1, y2) + increment, increment)\n xcoords = [x1] * len(ycoords)\n\n return np.asarray(xcoords), np.asarray(ycoords)\n\n\n# x and y are lists of coordinates along the normal line\n# returns all the interpolated pixel values of the image along the given line\ndef getInterpolatedPtsVal(x, y, img):\n xmax = img.shape[0]\n ymax = img.shape[1]\n xcoords = np.arange(xmax)\n ycoords = np.arange(ymax)\n sp = RectBivariateSpline(xcoords, ycoords, img)\n return sp(x, y, grid=False)\n\n\n# legacy code \n# def getPtsAlongLineOld(srcPt, dstPt, increment=0.1):\n# x1, y1 = srcPt\n# x2, y2 = dstPt\n# if x2 - x1 != 0:\n# gradient = (y2 - y1) / (x2 - x1)\n# xcoords = np.arange(min(x1, x2), max(x1, x2) + increment, increment)\n# ycoords = []\n# for x in xcoords:\n# y = gradient*(x - x1) + y1\n# ycoords.append(y)\n# else: \n# ycoords = np.arange(min(y1, y2), max(y1, y2) + increment, increment)\n# xcoords = [x1] * len(ycoords)\n\n# return np.asarray(xcoords), np.asarray(ycoords)\n\n" ]
[ [ "numpy.asfortranarray" ], [ "numpy.asarray", "numpy.arange", "scipy.interpolate.RectBivariateSpline" ] ]
PPOLYpubki/EF-Net
[ "f5ab2cb131fe38c2f21fadcd6f6ef0190d3d81d9" ]
[ "model/resnet.py" ]
[ "import math\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, inchannel, outchannel, stride=1):\n super(ResidualBlock, self).__init__()\n self.left = nn.Sequential(\n nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(outchannel),\n nn.ReLU(inplace=True),\n nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(outchannel)\n )\n self.shortcut = nn.Sequential()\n if stride != 1 or inchannel != outchannel:\n self.shortcut = nn.Sequential(\n nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(outchannel)\n )\n\n def forward(self, x):\n out = self.left(x)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet18(nn.Module):\n def __init__(self):\n self.inchannel = 64\n super(ResNet18, self).__init__()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)\n self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def make_layer(self, block, channels, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.inchannel, channels, stride))\n self.inchannel = channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n\nclass ResNet50(nn.Module):\n def __init__(self):\n self.inplanes = 64\n super(ResNet50, self).__init__()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(Bottleneck, 64, 3)\n self.layer2 = self._make_layer(Bottleneck, 128, 4, stride=2)\n self.layer3 = self._make_layer(Bottleneck, 256, 6, stride=2)\n self.layer4 = self._make_layer(Bottleneck, 512, 3, stride=2)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
ankitbarai507/Face-Recognition-
[ "f3b0179642aba7d9e27613ab80c4c73026c35869" ]
[ "train.py" ]
[ "import platform\nprint(platform.python_version())\nimport os\nfrom os.path import join, isdir\nfrom PIL import Image\n\nimport cv2\nimport numpy as np\n\nfrom detect import detect_faces, level_face\nimport config\n\ndef train_recognizer(db_folder, train_size=config.DEFAULT_FACE_SIZE, show_faces=False, force_train=False):\n \"\"\" Train and return face recognier.\n\n db_folder -- the image folder that group faces in sub-folders\n train_size -- tuple of x and y size for resizing faces found before training\n show_faces -- display images of faces found and used for training\n force_train -- force re-training even when previous training result is found\n \"\"\"\n print(cv2.__version__)\n # recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer = cv2.face.FisherFaceRecognizer_create()\n # recognizer = cv2.face.EigenFaceRecognizer_create()\n\n if (not force_train) and _load_recognizer(recognizer):\n return recognizer\n\n folders = _get_labels(db_folder)\n images = []\n labels = []\n\n label_count = 0\n label_map = {}\n\n for folder in folders:\n faces = _extract_faces(db_folder, folder, True)\n\n # resize all faces to same size for some recognizers\n images.extend([cv2.resize(face, train_size) for face in faces])\n\n labels.extend([label_count] * len(faces))\n label_map[label_count] = folder\n label_count += 1\n\n if show_faces:\n cv2.namedWindow(\"faces\", 1)\n cv2.imshow(\"faces\", _combine_faces(faces))\n cv2.waitKey(0)\n\n if show_faces:\n cv2.destroyWindow(\"faces\")\n\n recognizer.train(images, np.array(labels))\n for key in label_map:\n recognizer.setLabelInfo(key, label_map[key])\n\n _save_recognizer(recognizer)\n\n return recognizer\n\ndef _get_labels(a_dir):\n return [name for name in os.listdir(a_dir) if isdir(join(a_dir, name))]\n\ndef _supported_img(name):\n return name.lower().endswith('.png') or name.lower().endswith('.jpg') or name.lower().endswith('.jpeg')\n\ndef _combine_faces(faces, w=100, h=100, num_per_row=5):\n small_img = []\n row_img = []\n count = 0\n for img in faces:\n small_img.append(cv2.resize(img, (w, h)))\n count += 1\n if count % num_per_row == 0:\n count = 0\n row_img.append(np.concatenate(small_img, axis=1))\n small_img = []\n if len(small_img) > 0:\n for x in range (0, num_per_row-len(small_img)):\n small_img.append(np.zeros((h,w), np.uint8))\n row_img.append(np.concatenate(small_img, axis=1))\n\n return np.concatenate(row_img, axis=0)\n\ndef _extract_faces(a_dir, folder, do_level_face=False):\n faceCascade = cv2.CascadeClassifier(config.FACE_CASCADE_FILE)\n eyeCascade = cv2.CascadeClassifier(config.EYE_CASCADE_FILE)\n\n the_path = join(a_dir, folder)\n result = []\n\n for img in [f for f in os.listdir(the_path) if _supported_img(f)]:\n img_path = join(the_path, img)\n image, faces = detect_faces(cv2.imread(img_path), faceCascade, eyeCascade, True)\n if len(faces) == 0:\n print(\"No face found in \" + img_path)\n for ((x, y, w, h), eyedim) in faces:\n if not do_level_face:\n result.append(image[y:y+h, x:x+w])\n else:\n result.append(level_face(image, ((x, y, w, h), eyedim)))\n #result.append(image[y:y+h, x:x+w])\n return result\n\ndef _save_recognizer(recognizer, filename=config.RECOGNIZER_OUTPUT_FILE):\n recognizer.save(filename)\n\ndef _load_recognizer(recognizer, filename=config.RECOGNIZER_OUTPUT_FILE):\n try:\n recognizer.read(filename)\n return True\n except (cv2.error):\n return False\n\nif __name__ == '__main__':\n recognizer = train_recognizer('imgdb', show_faces=False, force_train=True)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros" ] ]
anCoderr/basicSelfDrivingCar
[ "faf91c5375f1a067bf00b785b6a5ce86c0a74b36" ]
[ "src/ai.py" ]
[ "# AI for Self Driving Car\n\n\nimport numpy as np\nimport random\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\n\n\n# Creating the basic architecture of the ANN\n\nclass Network(nn.Module):\n\n def __init__(self, input_size, nb_action):\n super(Network, self).__init__()\n self.input_size = input_size\n self.nb_action = nb_action\n self.fc1 = nn.Linear(input_size, 30)\n self.fc2 = nn.Linear(30, nb_action)\n\n def forward(self, state):\n x = F.relu(self.fc1(state))\n q_values = self.fc2(x)\n return q_values\n\n\n# Implementing Experience Replay for better performance\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n\n def push(self, event):\n self.memory.append(event)\n if len(self.memory) > self.capacity:\n del self.memory[0]\n\n def sample(self, batch_size):\n samples = zip(*random.sample(self.memory, batch_size))\n return map(lambda x: Variable(torch.cat(x, 0)), samples)\n\n\n# Implementing Deep Q Learning | \"The fun stuff begins\" ~ anCoderr\n\nclass Dqn:\n\n def __init__(self, input_size, nb_action, gamma):\n self.gamma = gamma\n self.reward_window = []\n self.model = Network(input_size, nb_action)\n self.memory = ReplayMemory(100000)\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)\n self.last_state = torch.Tensor(input_size).unsqueeze(0)\n self.last_action = 0\n self.last_reward = 0\n\n def select_action(self, state):\n probs = F.softmax(self.model(Variable(state, volatile=True)) * 75) \n # T=75 it defines the cars confidence in its actions. How biased are we for the exploitation of data rather than exploration.\n action = probs.multinomial(num_samples=1)\n return action.data[0, 0]\n\n def learn(self, batch_state, batch_next_state, batch_reward, batch_action):\n outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1)\n next_outputs = self.model(batch_next_state).detach().max(1)[0]\n target = self.gamma * next_outputs + batch_reward\n td_loss = F.smooth_l1_loss(outputs, target)\n self.optimizer.zero_grad()\n td_loss.backward(retain_graph=True)\n self.optimizer.step()\n\n def update(self, reward, new_signal):\n new_state = torch.Tensor(new_signal).float().unsqueeze(0)\n self.memory.push((self.last_state, new_state, torch.LongTensor([int(self.last_action)]), torch.Tensor([self.last_reward])))\n action = self.select_action(new_state)\n if len(self.memory.memory) > 100:\n batch_state, batch_next_state, batch_action, batch_reward = self.memory.sample(100)\n self.learn(batch_state, batch_next_state, batch_reward, batch_action)\n self.last_action = action\n self.last_state = new_state\n self.last_reward = reward\n self.reward_window.append(reward)\n if len(self.reward_window) > 1000:\n del self.reward_window[0]\n return action\n\n def score(self):\n return sum(self.reward_window) / (len(self.reward_window) + 1.)\n\n def save(self):\n torch.save({'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n }, 'last_brain.pth')\n\n def load(self):\n if os.path.isfile('last_brain.pth'):\n print(\"=> loading checkpoint... \")\n checkpoint = torch.load('last_brain.pth')\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"done !\")\n else:\n print(\"no checkpoint found...\")\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.load", "torch.nn.Linear", "torch.nn.functional.smooth_l1_loss", "torch.autograd.Variable" ] ]
zjdcts/CSAM-U-Net
[ "91fae3c6b4fc7247ba9ee2dc6e64b51da569bf2e" ]
[ "unet3d/models/pytorch/segmentation/unet.py" ]
[ "import torch\nfrom ..classification.myronenko import MyronenkoEncoder\nfrom ..classification.decoder import MirroredDecoder\nfrom ..autoencoder.variational import ConvolutionalAutoEncoder\n\n\nclass UNetEncoder(MyronenkoEncoder):\n def forward(self, x, y=None):\n outputs = list()\n for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):\n x = layer(x)\n outputs.insert(0, x)\n x = downsampling(x)\n x = self.layers[-1](x)\n # print(\"encoder output:\", x.size())\n outputs.insert(0, x)\n return outputs\n\n\nclass UNetDecoder(MirroredDecoder):\n def calculate_layer_widths(self, depth):\n in_width, out_width = super().calculate_layer_widths(depth=depth)\n if depth != len(self.layer_blocks) - 1:\n in_width *= 2\n print(\"Decoder {}:\".format(depth), in_width, out_width)\n return in_width, out_width\n\n def forward(self, inputs, y=None):\n x = inputs[0]\n # print(\"x:\", x.size())\n for i, (pre, up, lay) in enumerate(zip(self.pre_upsampling_blocks, self.upsampling_blocks, self.layers[:-1])):\n x = lay(x)\n # print(\"lay(x):\", x.size())\n x = pre(x)\n x = up(x)\n # print(\"up(x):\", x.size())\n x = torch.cat((x, inputs[i + 1]), 1)\n # print(\"cat(x):\", x.size())\n x = self.layers[-1](x)\n return x\n\n\nclass UNet(ConvolutionalAutoEncoder):\n def __init__(self, *args, encoder_class=UNetEncoder, decoder_class=UNetDecoder, n_outputs=1, is_training, is_dsv, is_half, **kwargs):\n super().__init__(*args, encoder_class=encoder_class, decoder_class=decoder_class, n_outputs=n_outputs, **kwargs)\n self.set_final_convolution(n_outputs=n_outputs)\n\n\nclass AutoImplantUNet(UNet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def forward(self, x):\n y = super(AutoImplantUNet, self).forward(x)\n return y - x\n\n def test(self, x):\n return super(AutoImplantUNet, self).forward(x)\n" ]
[ [ "torch.cat" ] ]
atentas/ennemi
[ "0f27c429139bc836e935583c0f9d4b8efafd7718" ]
[ "tests/integration/test_entropy_identities.py" ]
[ "# MIT License - Copyright Petri Laarne and contributors\n# See the LICENSE.md file included in this source code package\n\n\"\"\"Mathematical identities for entropy and mutual information.\"\"\"\n\nfrom ennemi import estimate_entropy, estimate_mi\nimport numpy as np\nimport unittest\n\nclass TestEntropyIdentities(unittest.TestCase):\n \n def test_mi_as_conditional_entropy_difference(self) -> None:\n # Make up some kind of distribution\n rng = np.random.default_rng(0)\n x = rng.gamma(shape=2.0, scale=1.0, size=2000)\n y = rng.normal(x, scale=1.0, size=x.shape)\n\n # We should have I(X;Y) = H(X) - H(X|Y)\n mi = estimate_mi(y, x)\n ent_x = estimate_entropy(x)\n cond_ent = estimate_entropy(x, cond=y)\n\n self.assertAlmostEqual(ent_x - cond_ent, mi, delta=0.02)\n \n def test_mi_as_sum_of_entropies(self) -> None:\n # Make up another distribution\n rng = np.random.default_rng(1)\n x = rng.chisquare(5, size=8000)\n y = rng.gamma(x, scale=1.0, size=x.shape)\n\n # We should have I(X;Y) = H(X) + H(Y) - H(X,Y)\n mi = estimate_mi(y, x)\n marginal = estimate_entropy(np.column_stack((x, y)))\n joint = estimate_entropy(np.column_stack((x,y)), multidim=True)\n\n self.assertAlmostEqual(np.sum(marginal) - joint, mi, delta=0.02)\n" ]
[ [ "numpy.sum", "numpy.random.default_rng", "numpy.column_stack" ] ]
amazon-research/BartGraphSumm
[ "162456fddaf32b3f9f44083b1cf180869b9db920" ]
[ "src/fairseq/fairseq/tasks/translation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom argparse import Namespace\nimport json\nimport itertools\nimport logging\nimport os\n\nimport numpy as np\n\nfrom fairseq import metrics, options, utils\nfrom fairseq.data import (\n AppendTokenDataset,\n ConcatDataset,\n data_utils,\n encoders,\n indexed_dataset,\n LanguagePairDataset,\n PrependTokenDataset,\n StripTokenDataset,\n TruncateDataset,\n)\n\nfrom fairseq.tasks import FairseqTask, register_task\n\nEVAL_BLEU_ORDER = 4\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_langpair_dataset(\n data_path, split,\n src, src_dict,\n tgt, tgt_dict,\n combine, dataset_impl, upsample_primary,\n left_pad_source, left_pad_target, max_source_positions,\n max_target_positions, prepend_bos=False, load_alignments=False,\n truncate_source=False, append_source_id=False,\n extra_input=None,\n enable_graph_encoder=False,\n graph_split_index=1\n):\n\n def split_exists(split, src, tgt, lang, data_path):\n filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))\n return indexed_dataset.dataset_exists(filename, impl=dataset_impl)\n\n src_datasets = []\n tgt_datasets = []\n\n for k in itertools.count():\n split_k = split + (str(k) if k > 0 else '')\n\n # infer langcode\n if split_exists(split_k, src, tgt, src, data_path):\n prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))\n elif split_exists(split_k, tgt, src, src, data_path):\n prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))\n else:\n if k > 0:\n break\n else:\n raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))\n\n src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)\n if truncate_source:\n src_dataset = AppendTokenDataset(\n TruncateDataset(\n StripTokenDataset(src_dataset, src_dict.eos()),\n max_source_positions - 1,\n ),\n src_dict.eos(),\n )\n src_datasets.append(src_dataset)\n\n tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)\n if tgt_dataset is not None:\n tgt_datasets.append(tgt_dataset)\n\n logger.info('{} {} {}-{} {} examples'.format(\n data_path, split_k, src, tgt, len(src_datasets[-1])\n ))\n\n if not combine:\n break\n\n assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0\n\n if len(src_datasets) == 1:\n src_dataset = src_datasets[0]\n tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None\n else:\n sample_ratios = [1] * len(src_datasets)\n sample_ratios[0] = upsample_primary\n src_dataset = ConcatDataset(src_datasets, sample_ratios)\n if len(tgt_datasets) > 0:\n tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)\n else:\n tgt_dataset = None\n\n if prepend_bos:\n assert hasattr(src_dict, \"bos_index\") and hasattr(tgt_dict, \"bos_index\")\n src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())\n\n eos = None\n if append_source_id:\n src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))\n if tgt_dataset is not None:\n tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))\n eos = tgt_dict.index('[{}]'.format(tgt))\n\n align_dataset = None\n if load_alignments:\n align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))\n if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):\n align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)\n\n tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None\n \n #Add the extra inputs if exist\n if extra_input is not None:\n extra_input_dataset = data_utils.load_extra_dataset(os.path.join(extra_input, '{}.graph'.format(\"val\" if split==\"valid\" else split)))\n else:\n extra_input_dataset = None\n\n return LanguagePairDataset(\n src_dataset, src_dataset.sizes, src_dict,\n tgt_dataset, tgt_dataset_sizes, tgt_dict,\n left_pad_source=left_pad_source,\n left_pad_target=left_pad_target,\n max_source_positions=max_source_positions,\n max_target_positions=max_target_positions,\n align_dataset=align_dataset, eos=eos,\n extra_input_dataset=extra_input_dataset,\n enable_graph_encoder=enable_graph_encoder,\n graph_split_index=graph_split_index\n )\n\n\n@register_task('translation')\nclass TranslationTask(FairseqTask):\n \"\"\"\n Translate from one (source) language to another (target) language.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n\n The translation task provides the following additional command-line\n arguments:\n\n .. argparse::\n :ref: fairseq.tasks.translation_parser\n :prog:\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('data', help='colon separated path to data directories list, \\\n will be iterated upon during epochs in round-robin manner')\n parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',\n help='source language')\n parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',\n help='target language')\n parser.add_argument('--load-alignments', action='store_true',\n help='load the binarized alignments')\n parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n parser.add_argument('--upsample-primary', default=1, type=int,\n help='amount to upsample primary dataset')\n parser.add_argument('--truncate-source', action='store_true', default=False,\n help='truncate source to max-source-positions')\n parser.add_argument('--extra-input', default=None, type=str, metavar='EXTRA',\n help='extra input path to the seq2seq model')\n parser.add_argument('--enable-graph-encoder', action='store_true', default=False,\n help='enable this argument for dual encoding of general text and graph text')\n parser.add_argument('--graph-split-index', type=int, default=952,\n help='split the input based on this index to get general text and graph text') #TODO: fix this\n\n\n # options for reporting BLEU during validation\n parser.add_argument('--eval-bleu', action='store_true',\n help='evaluation with BLEU scores')\n parser.add_argument('--eval-bleu-detok', type=str, default=\"space\",\n help='detokenize before computing BLEU (e.g., \"moses\"); '\n 'required if using --eval-bleu; use \"space\" to '\n 'disable detokenization; see fairseq.data.encoders '\n 'for other options')\n parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',\n help='args for building the tokenizer, if needed')\n parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,\n help='compute tokenized BLEU instead of sacrebleu')\n parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,\n help='remove BPE before computing BLEU')\n parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',\n help='generation args for BLUE scoring, '\n 'e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\'')\n parser.add_argument('--eval-bleu-print-samples', action='store_true',\n help='print sample generations during validation')\n # fmt: on\n\n def __init__(self, args, src_dict, tgt_dict):\n super().__init__(args)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n\n paths = utils.split_paths(args.data)\n assert len(paths) > 0\n # find language pair automatically\n if args.source_lang is None or args.target_lang is None:\n args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])\n if args.source_lang is None or args.target_lang is None:\n raise Exception('Could not infer language pair, please provide it explicitly')\n\n # load dictionaries\n src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))\n logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))\n\n return cls(args, src_dict, tgt_dict)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = utils.split_paths(self.args.data)\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n src, tgt = self.args.source_lang, self.args.target_lang\n\n self.datasets[split] = load_langpair_dataset(\n data_path, split, src, self.src_dict, tgt, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=self.args.max_source_positions,\n max_target_positions=self.args.max_target_positions,\n load_alignments=self.args.load_alignments,\n truncate_source=self.args.truncate_source,\n extra_input=self.args.extra_input,\n enable_graph_encoder=self.args.enable_graph_encoder,\n graph_split_index=self.args.graph_split_index\n\n )\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, ids=None, extra_input=None):\n if extra_input is None:\n return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary,\n enable_graph_encoder=self.args.enable_graph_encoder,\n graph_split_index=self.args.graph_split_index)\n else:\n if not hasattr(self, 'extra_input_dataset'):\n self.extra_input_dataset = data_utils.load_extra_dataset(os.path.join(extra_input, 'test.graph'))\n batch_extra_input_dataset = [self.extra_input_dataset[id_] for id_ in ids] # reordering the index ids with true ids\n return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary, extra_input_dataset=batch_extra_input_dataset)\n\n \n\n def build_model(self, args):\n model = super().build_model(args)\n if getattr(args, 'eval_bleu', False):\n assert getattr(args, 'eval_bleu_detok', None) is not None, (\n '--eval-bleu-detok is required if using --eval-bleu; '\n 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '\n 'to disable detokenization, e.g., when using sentencepiece)'\n )\n detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')\n self.tokenizer = encoders.build_tokenizer(Namespace(\n tokenizer=getattr(args, 'eval_bleu_detok', None),\n **detok_args\n ))\n\n gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')\n self.sequence_generator = self.build_generator([model], Namespace(**gen_args))\n return model\n\n def valid_step(self, sample, model, criterion):\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n if self.args.eval_bleu:\n bleu = self._inference_with_bleu(self.sequence_generator, sample, model)\n logging_output['_bleu_sys_len'] = bleu.sys_len\n logging_output['_bleu_ref_len'] = bleu.ref_len\n # we split counts into separate entries so that they can be\n # summed efficiently across workers using fast-stat-sync\n assert len(bleu.counts) == EVAL_BLEU_ORDER\n for i in range(EVAL_BLEU_ORDER):\n logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]\n logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n if self.args.eval_bleu:\n\n def sum_logs(key):\n return sum(log.get(key, 0) for log in logging_outputs)\n\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs('_bleu_counts_' + str(i)))\n totals.append(sum_logs('_bleu_totals_' + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar('_bleu_counts', np.array(counts))\n metrics.log_scalar('_bleu_totals', np.array(totals))\n metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))\n metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))\n\n def compute_bleu(meters):\n import inspect\n import sacrebleu\n fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]\n if 'smooth_method' in fn_sig:\n smooth = {'smooth_method': 'exp'}\n else:\n smooth = {'smooth': 'exp'}\n bleu = sacrebleu.compute_bleu(\n correct=meters['_bleu_counts'].sum,\n total=meters['_bleu_totals'].sum,\n sys_len=meters['_bleu_sys_len'].sum,\n ref_len=meters['_bleu_ref_len'].sum,\n **smooth\n )\n return round(bleu.score, 2)\n\n metrics.log_derived('bleu', compute_bleu)\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.args.max_source_positions, self.args.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict\n\n def _inference_with_bleu(self, generator, sample, model):\n import sacrebleu\n\n def decode(toks, escape_unk=False):\n s = self.tgt_dict.string(\n toks.int().cpu(),\n self.args.eval_bleu_remove_bpe,\n # The default unknown string in fairseq is `<unk>`, but\n # this is tokenized by sacrebleu as `< unk >`, inflating\n # BLEU scores. Instead, we use a somewhat more verbose\n # alternative that is unlikely to appear in the real\n # reference, but doesn't get split into multiple tokens.\n unk_string=(\n \"UNKNOWNTOKENINREF\" if escape_unk else \"UNKNOWNTOKENINHYP\"\n ),\n )\n if self.tokenizer:\n s = self.tokenizer.decode(s)\n return s\n\n gen_out = self.inference_step(generator, [model], sample, None)\n hyps, refs = [], []\n for i in range(len(gen_out)):\n hyps.append(decode(gen_out[i][0]['tokens']))\n refs.append(decode(\n utils.strip_pad(sample['target'][i], self.tgt_dict.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n ))\n if self.args.eval_bleu_print_samples:\n logger.info('example hypothesis: ' + hyps[0])\n logger.info('example reference: ' + refs[0])\n if self.args.eval_tokenized_bleu:\n return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')\n else:\n return sacrebleu.corpus_bleu(hyps, [refs])\n" ]
[ [ "numpy.array" ] ]
bruneli/statspy
[ "7bf07cdd25f7d5118f39fd5465853f4da9116c80" ]
[ "setup.py" ]
[ "#!/usr/bin/env python\n\nimport sys\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\n\nDISTNAME = 'statspy'\nDESCRIPTION = 'Python module for statistics built on top of NumPy/SciPy'\nLONG_DESCRIPTION = open('README.rst').read()\nURL = 'http://bruneli.github.io/statspy/'\nLICENSE = 'new BSD'\nDOWNLOAD_URL = 'http://sourceforge.net/projects/statspy/files/'\n\nbuiltins.__STATSPY_SETUP__ = True\n\nimport statspy\nVERSION = statspy.__version__\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n config.add_subpackage('statspy')\n config.add_data_dir('statspy/tests')\n return config\n\ndef setup_package():\n metadata = dict(name=DISTNAME,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n ])\n if (len(sys.argv) >= 2\n and ('--help' in sys.argv[1:] or sys.argv[1]\n in ('--help-commands', 'egg_info', '--version', 'clean'))):\n try:\n from setuptools import setup\n except ImportError:\n from distutils.core import setup\n metadata['version'] = VERSION\n else:\n from numpy.distutils.core import setup\n metadata['configuration'] = configuration\n setup(**metadata)\n\nif __name__ == \"__main__\":\n setup_package()\n" ]
[ [ "numpy.distutils.misc_util.Configuration" ] ]
apetkau/card-live-dashboard
[ "02a2f5a840bf2c1f66362f593e8319cc35a4b45b" ]
[ "card_live_dashboard/test/unit/service/test_TaxonomicParser.py" ]
[ "from typing import List, Dict, Optional\nimport pandas as pd\nfrom ete3 import NCBITaxa\n\nfrom card_live_dashboard.service.TaxonomicParser import TaxonomicParser\n\n\n# There's likely a way to use mock objects here instead of just creating a new class with the same methods\n# as NCBITaxa, but I couldn't figure out how to do it. That is applying @unittest.mock.patch('ete3.NCBITaxa') wouldn't\n# actually mock ete3.NCBITaxa.\n# I also know there's a warning here that I'm not calling the superclass constructor, but I can't call it since it will\n# attempt to install the entire NCBI Taxonomy database, which I don't want in unit tests.\nclass NCBITaxaMock(NCBITaxa):\n LINEAGES = {\n 28901: 'Salmonella enterica',\n 543: 'Enterobacteriaceae',\n 1639: 'Listeria monocytogenes',\n }\n\n def __init__(self):\n pass\n\n def get_lineage(self, lineage: int) -> Optional[List[int]]:\n return [lineage]\n\n def get_rank(self, lineages: List[int]) -> Dict[int, str]:\n ranks = {}\n for lineage in lineages:\n ranks[lineage] = 'species'\n return ranks\n\n def get_taxid_translator(self, taxids: List[int], try_synonyms: bool = True) -> Dict[int, str]:\n names = {}\n for id in taxids:\n names[id] = self.LINEAGES[id]\n return names\n\n\ndef test_taxonomic_parser_simple():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Salmonella enterica', '10', 28901],\n ['file1', 'Salmonella enterica', '10', 28901],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Salmonella enterica (chromosome)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 1 == len(file_matches)\n assert ['Salmonella enterica'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Salmonella enterica'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [True] == file_matches['matches'].tolist()\n\n\ndef test_taxonomic_parser_no_match():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Salmonella enterica', '10', 28901],\n ['file1', 'Enterobacteriaceae', '20', 543],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Salmonella enterica (plasmid)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 1 == len(file_matches)\n assert ['Enterobacteriaceae'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Salmonella enterica'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [False] == file_matches['matches'].tolist()\n\n\ndef test_taxonomic_parser_multiple_match():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Salmonella enterica', '20', 28901],\n ['file1', 'Enterobacteriaceae', '10', 543],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Salmonella enterica (plasmid)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 1 == len(file_matches)\n assert ['Salmonella enterica'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Salmonella enterica'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [True] == file_matches['matches'].tolist()\n\n\ndef test_taxonomic_parser_multiple_files():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Salmonella enterica', '20', 28901],\n ['file1', 'Enterobacteriaceae', '10', 543],\n ['file2', 'Enterobacteriaceae', '20', 543],\n ['file2', 'Salmonella enterica', '10', 28901],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Enterobacteriaceae (chromosome)'],\n ['file1', 'Enterobacteriaceae (plasmid)'],\n ['file2', 'Enterobacteriaceae (chromosome)'],\n ['file2', 'Enterobacteriaceae (plasmid)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 2 == len(file_matches)\n assert ['Salmonella enterica', 'Enterobacteriaceae'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Enterobacteriaceae', 'Enterobacteriaceae'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [False, True] == file_matches['matches'].tolist()\n\n\ndef test_taxonomic_parser_tie_case1():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Salmonella enterica', '20', 28901],\n ['file1', 'Enterobacteriaceae', '20', 543],\n ['file1', 'Listeria monocytogenes', '10', 1639],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Enterobacteriaceae (chromosome)'],\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Enterobacteriaceae (chromosome)'],\n ['file1', 'Listeria monocytogenes (chromosome)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 1 == len(file_matches)\n assert ['Salmonella enterica'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Salmonella enterica'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [True] == file_matches['matches'].tolist()\n\n\ndef test_taxonomic_parser_tie_case2():\n lmat_df = pd.DataFrame(columns=['filename', 'lmat.taxonomy_label', 'lmat.count', 'lmat.ncbi_taxon_id'],\n data=[\n ['file1', 'Enterobacteriaceae', '20', 543],\n ['file1', 'Listeria monocytogenes', '10', 1639],\n ['file1', 'Salmonella enterica', '20', 28901],\n ]).set_index('filename')\n\n rgi_kmer_df = pd.DataFrame(columns=['filename', 'rgi_kmer.CARD*kmer Prediction'],\n data=[\n ['file1', 'Listeria monocytogenes (chromosome)'],\n ['file1', 'Enterobacteriaceae (chromosome)'],\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Salmonella enterica (chromosome)'],\n ['file1', 'Enterobacteriaceae (chromosome)'],\n ]).set_index('filename')\n\n tax = TaxonomicParser(ncbi_taxa=NCBITaxaMock(), df_lmat=lmat_df, df_rgi_kmer=rgi_kmer_df)\n file_matches = tax.create_file_matches()\n\n assert 1 == len(file_matches)\n assert ['Salmonella enterica'] == file_matches['lmat.taxonomy_label'].tolist()\n assert ['Salmonella enterica'] == file_matches['rgi_kmer.taxonomy_label'].tolist()\n assert [True] == file_matches['matches'].tolist()\n" ]
[ [ "pandas.DataFrame" ] ]
Michalos88/dopamine
[ "2040da11d7de2fc304925c46d33f59af66759afa" ]
[ "dopamine/agents/implicit_quantile/implicit_quantile_agent.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Dopamine Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The implicit quantile networks (IQN) agent.\n\nThe agent follows the description given in \"Implicit Quantile Networks for\nDistributional RL\" (Dabney et. al, 2018).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n\n\nfrom dopamine.agents.rainbow import rainbow_agent\nfrom dopamine.discrete_domains import atari_lib\nimport tensorflow as tf\n\nimport gin.tf\n\nslim = tf.contrib.slim\n\n\[email protected]\nclass ImplicitQuantileAgent(rainbow_agent.RainbowAgent):\n \"\"\"An extension of Rainbow to perform implicit quantile regression.\"\"\"\n\n def __init__(self,\n sess,\n num_actions,\n network=atari_lib.implicit_quantile_network,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n summary_writer=None,\n summary_writing_frequency=500):\n \"\"\"Initializes the agent and constructs the Graph.\n\n Most of this constructor's parameters are IQN-specific hyperparameters whose\n values are taken from Dabney et al. (2018).\n\n Args:\n sess: `tf.Session` object for running associated ops.\n num_actions: int, number of actions the agent can take at any state.\n network: function expecting three parameters:\n (num_actions, network_type, state). This function will return the\n network_type object containing the tensors output by the network.\n See dopamine.discrete_domains.atari_lib.nature_dqn_network as\n an example.\n kappa: float, Huber loss cutoff.\n num_tau_samples: int, number of online quantile samples for loss\n estimation.\n num_tau_prime_samples: int, number of target quantile samples for loss\n estimation.\n num_quantile_samples: int, number of quantile samples for computing\n Q-values.\n quantile_embedding_dim: int, embedding dimension for the quantile input.\n double_dqn: boolean, whether to perform double DQN style learning\n as described in Van Hasselt et al.: https://arxiv.org/abs/1509.06461.\n summary_writer: SummaryWriter object for outputting training statistics.\n Summary writing disabled if set to None.\n summary_writing_frequency: int, frequency with which summaries will be\n written. Lower values will result in slower training.\n \"\"\"\n self.kappa = kappa\n # num_tau_samples = N below equation (3) in the paper.\n self.num_tau_samples = num_tau_samples\n # num_tau_prime_samples = N' below equation (3) in the paper.\n self.num_tau_prime_samples = num_tau_prime_samples\n # num_quantile_samples = k below equation (3) in the paper.\n self.num_quantile_samples = num_quantile_samples\n # quantile_embedding_dim = n above equation (4) in the paper.\n self.quantile_embedding_dim = quantile_embedding_dim\n # option to perform double dqn.\n self.double_dqn = double_dqn\n\n super(ImplicitQuantileAgent, self).__init__(\n sess=sess,\n num_actions=num_actions,\n network=network,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency)\n\n def _get_network_type(self):\n \"\"\"Returns the type of the outputs of the implicit quantile network.\n\n Returns:\n _network_type object defining the outputs of the network.\n \"\"\"\n return collections.namedtuple(\n 'iqn_network', ['quantile_values', 'quantiles'])\n\n def _network_template(self, state, num_quantiles):\n r\"\"\"Builds an Implicit Quantile ConvNet.\n\n Takes state and quantile as inputs and outputs state-action quantile values.\n\n Args:\n state: A `tf.placeholder` for the RL state.\n num_quantiles: int, number of quantile inputs.\n\n Returns:\n _network_type object containing quantile value outputs of the network.\n \"\"\"\n return self.network(self.num_actions, self.quantile_embedding_dim,\n self._get_network_type(), state, num_quantiles)\n\n def _build_networks(self):\n \"\"\"Builds the IQN computations needed for acting and training.\n\n These are:\n self.online_convnet: For computing the current state's quantile values.\n self.target_convnet: For computing the next state's target quantile\n values.\n self._net_outputs: The actual quantile values.\n self._q_argmax: The action maximizing the current state's Q-values.\n self._replay_net_outputs: The replayed states' quantile values.\n self._replay_next_target_net_outputs: The replayed next states' target\n quantile values.\n \"\"\"\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the next states\n # in the replay buffer. Compute the argmax over the Q-values.\n if self.double_dqn:\n outputs_action = self.online_convnet(self._replay.next_states,\n self.num_quantile_samples)\n else:\n outputs_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = outputs_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_net_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_net_target_q_values, axis=1)\n\n def _build_target_quantile_values_op(self):\n \"\"\"Build an op used as a target for return values at given quantiles.\n\n Returns:\n An op calculating the target quantile return.\n \"\"\"\n batch_size = tf.shape(self._replay.rewards)[0]\n # Shape of rewards: (num_tau_prime_samples x batch_size) x 1.\n rewards = self._replay.rewards[:, None]\n rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1])\n\n is_terminal_multiplier = 1. - tf.to_float(self._replay.terminals)\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1.\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None],\n [self.num_tau_prime_samples, 1])\n\n # Get the indices of the maximium Q-value across the action dimension.\n # Shape of replay_next_qt_argmax: (num_tau_prime_samples x batch_size) x 1.\n\n replay_next_qt_argmax = tf.tile(\n self._replay_next_qt_argmax[:, None], [self.num_tau_prime_samples, 1])\n\n # Shape of batch_indices: (num_tau_prime_samples x batch_size) x 1.\n batch_indices = tf.cast(tf.range(\n self.num_tau_prime_samples * batch_size)[:, None], tf.int64)\n\n # Shape of batch_indexed_target_values:\n # (num_tau_prime_samples x batch_size) x 2.\n batch_indexed_target_values = tf.concat(\n [batch_indices, replay_next_qt_argmax], axis=1)\n\n # Shape of next_target_values: (num_tau_prime_samples x batch_size) x 1.\n target_quantile_values = tf.gather_nd(\n self._replay_net_target_quantile_values,\n batch_indexed_target_values)[:, None]\n\n return rewards + gamma_with_terminal * target_quantile_values\n\n def _build_train_op(self):\n \"\"\"Builds a training op.\n\n Returns:\n train_op: An op performing one step of training from replay data.\n \"\"\"\n batch_size = tf.shape(self._replay.rewards)[0]\n\n target_quantile_values = tf.stop_gradient(\n self._build_target_quantile_values_op())\n # Reshape to self.num_tau_prime_samples x batch_size x 1 since this is\n # the manner in which the target_quantile_values are tiled.\n target_quantile_values = tf.reshape(target_quantile_values,\n [self.num_tau_prime_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_prime_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of target_quantile_values:\n # batch_size x num_tau_prime_samples x 1.\n target_quantile_values = tf.transpose(target_quantile_values, [1, 0, 2])\n\n # Shape of indices: (num_tau_samples x batch_size) x 1.\n # Expand dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n indices = tf.range(self.num_tau_samples * batch_size)[:, None]\n\n # Expand the dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n reshaped_actions = self._replay.actions[:, None]\n reshaped_actions = tf.tile(reshaped_actions, [self.num_tau_samples, 1])\n # Shape of reshaped_actions: (num_tau_samples x batch_size) x 2.\n reshaped_actions = tf.concat([indices, reshaped_actions], axis=1)\n\n chosen_action_quantile_values = tf.gather_nd(\n self._replay_net_quantile_values, reshaped_actions)\n # Reshape to self.num_tau_samples x batch_size x 1 since this is the manner\n # in which the quantile values are tiled.\n chosen_action_quantile_values = tf.reshape(chosen_action_quantile_values,\n [self.num_tau_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of chosen_action_quantile_values:\n # batch_size x num_tau_samples x 1.\n chosen_action_quantile_values = tf.transpose(\n chosen_action_quantile_values, [1, 0, 2])\n\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n bellman_errors = target_quantile_values[\n :, :, None, :] - chosen_action_quantile_values[:, None, :, :]\n # The huber loss (see Section 2.3 of the paper) is defined via two cases:\n # case_one: |bellman_errors| <= kappa\n # case_two: |bellman_errors| > kappa\n huber_loss_case_one = tf.to_float(\n tf.abs(bellman_errors) <= self.kappa) * 0.5 * bellman_errors ** 2\n huber_loss_case_two = tf.to_float(\n tf.abs(bellman_errors) > self.kappa) * self.kappa * (\n tf.abs(bellman_errors) - 0.5 * self.kappa)\n huber_loss = huber_loss_case_one + huber_loss_case_two\n\n # Reshape replay_quantiles to batch_size x num_tau_samples x 1\n replay_quantiles = tf.reshape(\n self._replay_net_quantiles, [self.num_tau_samples, batch_size, 1])\n replay_quantiles = tf.transpose(replay_quantiles, [1, 0, 2])\n\n # Tile by num_tau_prime_samples along a new dimension. Shape is now\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n # These quantiles will be used for computation of the quantile huber loss\n # below (see section 2.3 of the paper).\n replay_quantiles = tf.to_float(tf.tile(\n replay_quantiles[:, None, :, :], [1, self.num_tau_prime_samples, 1, 1]))\n # Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.\n quantile_huber_loss = (tf.abs(replay_quantiles - tf.stop_gradient(\n tf.to_float(bellman_errors < 0))) * huber_loss) / self.kappa\n # Sum over current quantile value (num_tau_samples) dimension,\n # average over target quantile value (num_tau_prime_samples) dimension.\n # Shape: batch_size x num_tau_prime_samples x 1.\n loss = tf.reduce_sum(quantile_huber_loss, axis=2)\n # Shape: batch_size x 1.\n loss = tf.reduce_mean(loss, axis=1)\n\n # TODO(kumasaurabh): Add prioritized replay functionality here.\n update_priorities_op = tf.no_op()\n with tf.control_dependencies([update_priorities_op]):\n if self.summary_writer is not None:\n # Runs only in the debug_mode\n with tf.variable_scope('Losses'):\n tf.summary.scalar('QuantileLoss', tf.reduce_mean(loss))\n return self.optimizer.minimize(tf.reduce_mean(loss)), tf.reduce_mean(loss)\n" ]
[ [ "tensorflow.concat", "tensorflow.transpose", "tensorflow.gather_nd", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.reduce_sum", "tensorflow.range", "tensorflow.reshape", "tensorflow.control_dependencies", "tensorflow.no_op", "tensorflow.make_template", "tensorflow.to_float", "tensorflow.variable_scope", "tensorflow.argmax", "tensorflow.tile", "tensorflow.abs" ] ]
GW-Wang-thu/ORB-based-displacement-monitoring-system
[ "6dfc12487dbc27af5eb5a0ef72aa3958891c192e", "6dfc12487dbc27af5eb5a0ef72aa3958891c192e" ]
[ "TDMS/OLD/pointdisp.py", "TDMS/Preprocess.py" ]
[ "import cv2\r\nimport numpy as np\r\nfrom Matcher import ORB_matcher\r\n\r\nclass Pointmarker:\r\n\r\n def __init__(self, refimg, defimg, cameraid=0):\r\n self.refimg = refimg\r\n self.defimg = defimg\r\n # self.camera = cv2.VideoCapture(cameraid)\r\n self.refpoints = []\r\n self.__genshowimg__()\r\n self.__getpoints__()\r\n\r\n\r\n def __genshowimg__(self):\r\n img = np.column_stack((np.column_stack((self.refimg, (np.ones((3072, 14)) * 255).astype(\"uint8\"))), self.defimg))\r\n self.showimg = cv2.resize(img, (1369, 1024), cv2.INTER_CUBIC)\r\n\r\n def __getpoints__(self):\r\n cv2.namedWindow('Select Points')\r\n cv2.setMouseCallback('Select Points', self.__selectpoint__)\r\n while(1):\r\n # self.defimg = self.camera.read()\r\n self.__refreshdisp__(show=1)\r\n cv2.imshow(\"Select Points\", self.showimg)\r\n if cv2.waitKey(20) & 0xFF == 27:\r\n break\r\n\r\n def __selectpoint__(self, event, x, y, flags, param):\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n kp1, kp2, des1, matches, boxsize = self.__findkp__((x*3, y*3))\r\n if matches is not None:\r\n id1 = matches[0].queryIdx\r\n id2 = matches[0].trainIdx\r\n distance = matches[0].distance\r\n for i in range(len(matches)):\r\n if matches[i].distance < distance:\r\n distance = matches[i].distance\r\n id1 = matches[i].queryIdx\r\n id2 = matches[i].trainIdx\r\n des = np.zeros((1, des1.shape[1]))\r\n des[0, :] = des1[id1]\r\n\r\n self.refpoints.append([(x * 3 + (kp1[id1].pt[0] - boxsize), y * 3 + (kp1[id1].pt[1] - boxsize)), des, []])\r\n cv2.circle(self.showimg, (x + int((kp1[id1].pt[0] - boxsize)/3), y + int((kp1[id1].pt[1] - boxsize)/3)), 1, (255, 255, 255), 2)\r\n cv2.circle(self.showimg, (x + int((kp2[id2].pt[0] + 2062 - boxsize - 50)/3), y + int((kp2[id2].pt[1] - boxsize - 50)/3)), 1, (255, 255, 255), 2)\r\n\r\n def __findkp__(self, givenpoint):\r\n boxsize = 15\r\n refmatcher = ORB_matcher(nfeatures=20, scaleFactor=1.1, nlevels=5) # 重要参数:推荐nfeature为tolerence的两倍\r\n defmatcher = ORB_matcher(nfeatures=20, scaleFactor=1.1, nlevels=5)\r\n while boxsize < 100:\r\n refbox = [(givenpoint[0]-boxsize, givenpoint[1]-boxsize), (givenpoint[0]+boxsize+1, givenpoint[1]+boxsize+1)]\r\n if any(refbox) < 0:\r\n break\r\n refblock = self.refimg[refbox[0][1]:refbox[1][1], refbox[0][0]:refbox[1][0]]\r\n kp1, des1 = refmatcher.detect(refblock, show=0)\r\n if des1 is not None:\r\n defbox = [(givenpoint[0]-boxsize-50, givenpoint[1]-boxsize-50), (givenpoint[0]+boxsize+51, givenpoint[1]+boxsize+51)]\r\n defblock = self.defimg[defbox[0][1]:defbox[1][1], defbox[0][0]:defbox[1][0]]\r\n if any(defbox) < 0:\r\n print(\"out of range\")\r\n break\r\n kp2, des2 = defmatcher.detect(defblock, show=0)\r\n defmatcher.match(des1, des2)\r\n if len(defmatcher.matches) > 10: # 重要参数:增大会增加匹配精度,但会减少匹配数量;根据特征点稀疏状况调整,特征点稀疏位置不方便使用太大\r\n return kp1, kp2, des1, defmatcher.matches, boxsize\r\n boxsize += 5\r\n print(\"Key Point Not Found!\")\r\n return None, None, None, None, None\r\n\r\n def __refreshdisp__(self, show):\r\n matcher = ORB_matcher(nfeatures=100, scaleFactor=1.2, nlevels=10)\r\n for i in range(len(self.refpoints)):\r\n defbox = [(int(self.refpoints[i][0][0]) - 200, int(self.refpoints[i][0][1]) - 200), (int(self.refpoints[i][0][0]) + 200, int(self.refpoints[i][0][1]) + 200)]\r\n defblock = self.defimg[defbox[0][1]:defbox[1][1], defbox[0][0]:defbox[1][0]]\r\n if any(defbox) < 0:\r\n print(\"out of range\")\r\n break\r\n kp2, des2 = matcher.detect(defblock, show=0)\r\n des1 = self.refpoints[i][1].astype(\"uint8\")\r\n matcher.match(des1, des2)\r\n if matcher.matches is not None:\r\n distance = matcher.matches[0].distance\r\n for j in range(len(matcher.matches)):\r\n id2 = matcher.matches[0].trainIdx\r\n if matcher.matches[j].distance < distance:\r\n distance = matcher.matches[j].distance\r\n id2 = matcher.matches[j].trainIdx\r\n x_ref = self.refpoints[i][0][0]\r\n y_ref = self.refpoints[i][0][1]\r\n x_def = kp2[id2].pt[0] - 200 + int(self.refpoints[i][0][0])\r\n y_def = kp2[id2].pt[1] - 200 + int(self.refpoints[i][0][1])\r\n u = x_def - x_ref\r\n v = y_def - y_ref\r\n self.refpoints[i][2] = [u, v]\r\n if show:\r\n position1 = (int(self.refpoints[i][0][0]/3) + 5, int(self.refpoints[i][0][1]/3) - 10)\r\n position2 = (int(self.refpoints[i][0][0]/3) + 5, int(self.refpoints[i][0][1]/3) + 10)\r\n text1 = \"U=\"+str(np.round(u, 3))\r\n text2 = \"V=\"+str(np.round(v, 3))\r\n cv2.putText(self.showimg, text1, position1,\r\n fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5, color=(255, 255, 255), thickness=1)\r\n cv2.putText(self.showimg, text2, position2,\r\n fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5, color=(255, 255, 255), thickness=1)\r\n\r\n # def show(self, savepath=None):\r\n # if savepath:\r\n # print(\"save markedimg to \"+savepath)\r\n # cv2.imwrite(savepath, savepath.markedref)\r\n # cv2.imshow(\"MARKED REF IMG\", cv2.resize(self.markedref, (720, 1080), cv2.INTER_CUBIC))\r\n # cv2.waitKey()\r\nif __name__==\"__main__\":\r\n dir = \"D:/Research/Working-On/Tower_Disp_Monitor/Data/\"\r\n outdir = \"D:/Research/Working-On/Tower_Disp_Monitor/Data/out/\"\r\n # 读取图片内容\r\n refimg = cv2.imread(dir + 'Image_20200403160132931.bmp', 0)\r\n defimg = cv2.imread(dir + 'Image_20200403160130637.bmp', 0) # 04045\r\n refimg = cv2.rotate(refimg, cv2.ROTATE_90_COUNTERCLOCKWISE)\r\n defimg = cv2.rotate(defimg, cv2.ROTATE_90_COUNTERCLOCKWISE)\r\n mymarker = Pointmarker(refimg, defimg)\r\n", "import cv2\r\nimport numpy as np\r\nimport torch\r\nfrom utils import VDSR, RealSR\r\n\r\n\r\nclass Processor:\r\n\r\n def __init__(self, imsize=(3072, 2048)):\r\n self.imsize = imsize\r\n\r\n def denoise(self, img, method=\"Gaussian\", ksize=3):\r\n if method == \"Gaussian\":\r\n out_img = cv2.GaussianBlur(img, ksize)\r\n if method == \"Median\":\r\n out_img = cv2.medianBlur(img, ksize)\r\n if method == \"Bilater\":\r\n out_img = cv2.bilateralFilter(img, ksize, 31, 31)\r\n\r\n def enhance(self, img, crop=(0, 0), methpd=\"Linear\"):\r\n out = img * img > crop[0]\r\n out = (out - 255) * (out < crop[1]) + 255\r\n\r\n if methpd == \"Linear\":\r\n scale = (np.max(out) - np.min(out))\r\n out = out * 255 / scale\r\n\r\n if methpd == \"Regular\":\r\n out = cv2.normalize(out, 255, 0, cv2.NORM_MINMAX, cv2.CV_8U)\r\n\r\n out = out.astype(\"uint8\")\r\n return out\r\n\r\n def deblure(self, img, method=\"VDSR\"):\r\n if method == \"VDSR\":\r\n model = VDSR.model()\r\n model.load_state_dict(\"./utils/vdsrparams.pth\")\r\n out = model(img)\r\n out = model.post(out)\r\n if method == \"RealSR\":\r\n model = RealSR.model()\r\n model.load_state_dict(\"./utils/realsrparams.pth\")\r\n out = model(img)\r\n out = model.post(out)\r\n return out\r\n\r\n def sharpen(self, img, ksize=3):\r\n kernel = np.array([[0, -1, 0],\r\n [-1, 5, -1],\r\n [0, -1, 0]], np.float32) # 定义一个核\r\n out = cv2.filter2D(img, -1, kernel=kernel)\r\n return out\r\n\r\n def Sequencial_process(self, img, operations):\r\n for i in range(len(operations)):\r\n out = exec(\"self.\"+operations[i][0]+\"(img, \"+operations[i][1]+\")\")\r\n return out" ]
[ [ "numpy.round", "numpy.zeros", "numpy.ones" ], [ "numpy.max", "numpy.array", "numpy.min" ] ]
PassengerAI/shared
[ "a30ffb43e957e8c62ed8e90d0902d85202528b21" ]
[ "paitypes/tests/unit/image/test_resizing.py" ]
[ "import pytest\nimport numpy as np\n\nfrom typing import List, Tuple\n\nfrom paitypes.geometry.bounding_box import BoundingBox\nfrom paitypes.geometry.Shape import Shape\n\nfrom paitypes.image import Image, BGRImage, GrayscaleImage\nfrom paitypes.image.resizing import (\n ImageResizingException,\n resize_image_to_size,\n crop_image_to_bounding_box)\n\nfrom paitypes.tests.fixtures.fixture_image import (\n random_grayscale_image,\n random_bgr_image,\n black_grayscale_image,\n gray_grayscale_image,\n white_grayscale_image,\n grayscale_images,\n black_bgr_image,\n gray_bgr_image,\n white_bgr_image,\n bgr_images,\n all_valid_images,\n empty_bgr_image,\n empty_grayscale_image,\n empty_images)\nfrom paitypes.tests.fixtures.fixture_bounding_box import (\n empty_bbox, full_bbox, partial_bbox, partial_float_bbox)\n\n\nclass TestResizeImageToSize():\n @pytest.mark.parametrize('size', [\n Shape(30, 20),\n Shape(100, 50),\n Shape(100, 100),\n Shape(2000, 5000)\n ])\n def test_resize_image_to_valid_size(self,\n all_valid_images: List[Image],\n size: Shape\n ) -> None:\n for image in all_valid_images:\n resized_image = resize_image_to_size(image, size)\n assert (resized_image.shape[:2] == (size.height, size.width))\n assert (len(resized_image.shape) == len(image.shape))\n\n @pytest.mark.parametrize('size', [\n Shape(-100, -100),\n Shape(-100, 1),\n Shape(1, -100),\n Shape(0, 0),\n Shape(1, 0),\n Shape(0, 1),\n Shape(-100, 100),\n Shape(100, -100)\n ])\n def test_resize_image_to_invalid_size_raises(\n self,\n all_valid_images: List[Image],\n size: Shape\n ) -> None:\n for image in all_valid_images:\n with pytest.raises(ImageResizingException):\n resize_image_to_size(image, size)\n\n def test_resize_empty_image_to_size_raises(self,\n empty_images: List[Image]\n ) -> None:\n for empty_image in empty_images:\n with pytest.raises(ImageResizingException):\n resize_image_to_size(empty_image, Shape(30, 30))\n\n\nclass TestResizeCropImageToBoundingBox():\n def test_crop_to_partial_bbox(self,\n all_valid_images: List[Image],\n partial_bbox: BoundingBox) -> None:\n for image in all_valid_images:\n cropped_image = crop_image_to_bounding_box(image, partial_bbox)\n assert (np.array_equal(cropped_image, image[26:70, 25:75]))\n\n def test_crop_to_full_bbox(self,\n all_valid_images: List[Image],\n full_bbox: BoundingBox) -> None:\n for image in all_valid_images:\n cropped_image = crop_image_to_bounding_box(image, full_bbox)\n assert (np.array_equal(cropped_image, image))\n\n def test_crop_to_partial_float_bbox(self,\n all_valid_images: List[Image],\n partial_float_bbox: BoundingBox\n ) -> None:\n for image in all_valid_images:\n cropped_image = crop_image_to_bounding_box(image,\n partial_float_bbox)\n assert (np.array_equal(cropped_image, image[27:74, 33:68]))\n\n def test_crop_to_empty_bbox_raises(self,\n all_valid_images: List[Image],\n empty_bbox: BoundingBox) -> None:\n for image in all_valid_images:\n with pytest.raises(ImageResizingException):\n crop_image_to_bounding_box(image, empty_bbox)\n\n def test_crop_to_empty_image_raises(self,\n empty_images: List[Image],\n full_bbox: BoundingBox) -> None:\n for empty_image in empty_images:\n with pytest.raises(ImageResizingException):\n crop_image_to_bounding_box(empty_image, full_bbox)\n\n @pytest.mark.parametrize('bbox', [\n BoundingBox(-1.0, 0.0, 0.0, 100.0),\n BoundingBox(0.0, 0.0, -1.0, 100.0),\n BoundingBox(0.0, 101.0, 0.0, 100.0),\n BoundingBox(0.0, 100.0, 0.0, 101.0)])\n def test_crop_to_out_of_bounds_bbox_raises(self,\n all_valid_images: Image,\n bbox: BoundingBox\n ) -> None:\n for image in all_valid_images:\n with pytest.raises(ImageResizingException):\n crop_image_to_bounding_box(image, bbox)\n" ]
[ [ "numpy.array_equal" ] ]
ElanaPearl/ml-playground
[ "be0309c7546c267bda3bf4c970d55b7ec673b199" ]
[ "optimizers.py" ]
[ "from typing import Dict, List\n\nimport torch\nfrom torch.functional import Tensor\n\n\nclass Adam:\n def __init__(\n self,\n params: List[torch.nn.Parameter],\n lr=0.001,\n beta_1=0.9,\n beta_2=0.999,\n eps=10e-8,\n ):\n self.params = params\n self.lr = lr\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.eps = eps\n self.timestep = 0\n\n self.first_moments: Dict[torch.nn.Parameter, Tensor] = {}\n self.second_moments: Dict[torch.nn.Parameter, Tensor] = {}\n for param in params:\n self.first_moments[param] = torch.zeros_like(param)\n self.second_moments[param] = torch.zeros_like(param)\n\n def step(self):\n self.timestep += 1\n beta_1 = self.beta_1\n beta_2 = self.beta_2\n\n for p in self.params:\n first_moment = add_to_moving_average(\n old=self.first_moments[p], new=p.grad, beta=beta_1\n )\n second_moment = add_to_moving_average(\n old=self.second_moments[p], new=p.grad ** 2, beta=beta_2\n )\n bias_corrected_first_moment = first_moment * (1 - beta_1 ** self.timestep)\n bias_corrected_second_moment = second_moment * (1 - beta_2 ** self.timestep)\n p.data -= (\n self.lr\n * bias_corrected_first_moment\n / (torch.sqrt(bias_corrected_second_moment) + self.eps)\n )\n self.first_moments[p] = first_moment\n self.second_moments[p] = second_moment\n\n\ndef add_to_moving_average(old, new, beta):\n return old * beta + new * (1 - beta)\n" ]
[ [ "torch.sqrt", "torch.zeros_like" ] ]
wendazhou/jax
[ "d7894198a1ad0e54de42450c27ad5e715cb59aa1" ]
[ "tests/scipy_stats_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport itertools\nimport unittest\n\nfrom absl.testing import absltest, parameterized\n\nimport numpy as onp\nimport scipy.stats as osp_stats\nfrom scipy.stats import random_correlation\n\nfrom jax import test_util as jtu\nfrom jax.scipy import stats as lsp_stats\nfrom jax.scipy.special import expit\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\nall_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]\n\nfloat_dtypes = [onp.float32, onp.float64]\n\nCombosWithReplacement = itertools.combinations_with_replacement\n\ndef genNamedParametersNArgs(n, rng):\n return parameterized.named_parameters(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\"\", shapes, dtypes),\n \"rng\": rng, \"shapes\": shapes, \"dtypes\": dtypes}\n for shapes in CombosWithReplacement(all_shapes, n)\n for dtypes in CombosWithReplacement(float_dtypes, n)))\n\n\nclass LaxBackedScipyStatsTests(jtu.JaxTestCase):\n \"\"\"Tests for LAX-backed scipy.stats implementations\"\"\"\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testBernoulliLogPmf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.bernoulli.logpmf\n lax_fun = lsp_stats.bernoulli.logpmf\n\n def args_maker():\n x, logit, loc = map(rng, shapes, dtypes)\n x = onp.floor(x)\n p = expit(logit)\n loc = onp.floor(loc)\n return [x, p, loc]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,\n tol=1e-4)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(5, jtu.rand_positive())\n def testBetaLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.beta.logpdf\n lax_fun = lsp_stats.beta.logpdf\n\n def args_maker():\n x, a, b, loc, scale = map(rng, shapes, dtypes)\n return [x, a, b, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,\n tol=1e-4)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testCauchyLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.cauchy.logpdf\n lax_fun = lsp_stats.cauchy.logpdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(2, jtu.rand_positive())\n def testDirichletLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.cauchy.logpdf\n lax_fun = lsp_stats.cauchy.logpdf\n dim = 4\n shapes = (shapes[0] + (dim,), shapes[1] + (dim,))\n\n def args_maker():\n x, alpha = map(rng, shapes, dtypes)\n x = x / onp.sum(x, axis=-1, keepdims=True)\n return [x, alpha]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(3, jtu.rand_positive())\n def testExponLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.expon.logpdf\n lax_fun = lsp_stats.expon.logpdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(4, jtu.rand_positive())\n def testGammaLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.gamma.logpdf\n lax_fun = lsp_stats.gamma.logpdf\n\n def args_maker():\n x, a, loc, scale = map(rng, shapes, dtypes)\n return [x, a, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,\n tol=5e-4)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(3, jtu.rand_positive())\n def testLaplaceLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.laplace.logpdf\n lax_fun = lsp_stats.laplace.logpdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(scale, a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testLaplaceCdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.laplace.cdf\n lax_fun = lsp_stats.laplace.cdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # ensure that scale is not too low\n scale = onp.clip(scale, a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n # TODO: currently it ignores the argument \"shapes\" and only tests dim=4\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testMultivariateNormalLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.multivariate_normal.logpdf\n lax_fun = lsp_stats.multivariate_normal.logpdf\n dim = 4\n shapex = (dim,)\n\n def args_maker():\n x, mean, cov = map(rng, (shapex, shapex, (dim, dim)), dtypes)\n cov = random_correlation.rvs(onp.arange(1, 1+dim) * 2 / (dim + 1))\n return [x, mean, cov]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,\n tol=1e-4)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testNormLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.norm.logpdf\n lax_fun = lsp_stats.norm.logpdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testNormLogCdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.norm.logcdf\n lax_fun = lsp_stats.norm.logcdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testNormCdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.norm.cdf\n lax_fun = lsp_stats.norm.cdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [x, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testNormPpf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.norm.ppf\n lax_fun = lsp_stats.norm.ppf\n\n def args_maker():\n q, loc, scale = map(rng, shapes, dtypes)\n # ensure probability is between 0 and 1:\n q = onp.clip(onp.abs(q / 3), a_min=None, a_max=1)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [q, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(4, jtu.rand_positive())\n def testParetoLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.pareto.logpdf\n lax_fun = lsp_stats.pareto.logpdf\n\n def args_maker():\n x, b, loc, scale = map(rng, shapes, dtypes)\n return [x, b, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(4, jtu.rand_default())\n def testTLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.t.logpdf\n lax_fun = lsp_stats.t.logpdf\n\n def args_maker():\n x, df, loc, scale = map(rng, shapes, dtypes)\n # clipping to ensure that scale is not too low\n scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)\n return [x, df, loc, scale]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n\n @genNamedParametersNArgs(3, jtu.rand_default())\n def testUniformLogPdf(self, rng, shapes, dtypes):\n scipy_fun = osp_stats.uniform.logpdf\n lax_fun = lsp_stats.uniform.logpdf\n\n def args_maker():\n x, loc, scale = map(rng, shapes, dtypes)\n return [x, loc, onp.abs(scale)]\n\n self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)\n self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n\n def testIssue972(self):\n self.assertAllClose(\n onp.ones((4,), onp.float32),\n lsp_stats.norm.cdf(onp.full((4,), onp.inf, onp.float32)),\n check_dtypes=False)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.abs", "numpy.clip", "numpy.arange", "numpy.ones", "numpy.full", "numpy.floor", "numpy.sum" ] ]
Gabo-Tor/Abadejo-Chess
[ "2880eca36ee5a5f5bc7b917a378e32d246ae3f50" ]
[ "create_database.py" ]
[ "from typing import Type\r\nimport chess\r\nimport chess.pgn\r\nimport numpy as np\r\n\r\n# we usea a PGN database from: https://database.lichess.org/\r\nDATABASE = r\"lichess_db_standard_rated_2015-05.pgn\"\r\n\r\ndef read():\r\n # Reads a lichess PGN dump and saves all anotated positions to a database file \r\n positions, games, exponent = 0, 0, 1\r\n dbX = np.array(parse_board(chess.Board())) # Data\r\n dbY = np.array([520]) # Target\r\n pgn = open(DATABASE)\r\n\r\n for games in range(2137556): # Don't really now how to read the length of the DATABASE\r\n game = chess.pgn.read_game(pgn)\r\n\r\n for move in game.mainline():\r\n povScore = move.eval()\r\n if povScore == None: # We only care about anotated games\r\n continue\r\n board = move.board()\r\n # doing it like this is super inefficient on large databases, the O(n^2) dont use np.append because it has to look all the list before append\r\n if board.turn == chess.WHITE: # We always look the board from the perspective of the player about to move\r\n dbX = np.append(dbX, parse_board(board), axis=0)\r\n dbY = np.append(dbY, [povScore.pov(chess.WHITE).wdl(model=\"lichess\").wins], axis=0)\r\n else:\r\n dbX = np.append(dbX, parse_board(board.mirror()), axis=0)\r\n dbY = np.append(dbY, [1000-povScore.pov(chess.WHITE).wdl(model=\"lichess\").wins], axis=0)\r\n\r\n positions += 1\r\n games += 1\r\n\r\n if not games % 100:\r\n print(f\"positions evaluated: {positions} in {games} games\")\r\n\r\n if not games % (10**exponent):\r\n np.save(f\"{positions}_positions_data\",dbX)\r\n np.save(f\"{positions}_positions_targets\",dbY)\r\n dbX = np.array(parse_board(chess.Board())) # Reset data array\r\n dbY = np.array([520]) # Reset target array\r\n exponent += 1\r\n pgn.close()\r\n\r\n\r\ndef parse_board(board):\r\n # Returns a 7x8x8 one hot encoded piece location tensor, with 1 and -1 \r\n # corresponing to black and white and an extra map set to 1\r\n pieceTensor = {\r\n chess.PAWN: np.zeros((1,1,8,8), dtype= 'int8'),\r\n chess.BISHOP: np.zeros((1,1,8,8), dtype= 'int8'),\r\n chess.KNIGHT: np.zeros((1,1,8,8), dtype= 'int8'),\r\n chess.ROOK: np.zeros((1,1,8,8), dtype= 'int8'),\r\n chess.QUEEN: np.zeros((1,1,8,8), dtype= 'int8'),\r\n chess.KING: np.zeros((1,1,8,8), dtype= 'int8')}\r\n\r\n for square in range(64):\r\n piece = board.piece_map().get(square)\r\n if not piece == None:\r\n # Our data is really sparse, so we encode both colors in the same map\r\n if piece.color == chess.WHITE:\r\n pieceTensor.get(piece.piece_type)[0, 0, square//8, square%8] = 1\r\n else:\r\n pieceTensor.get(piece.piece_type)[0, 0, square//8, square%8] = -1\r\n # We add a board full of ones so we don't loose track of the borders of the board when using padding\r\n outTensor = np.ones((1,1,8,8), dtype= 'int8') \r\n for piece in pieceTensor.keys():\r\n outTensor = np.append(outTensor, pieceTensor.get(piece),axis = 1)\r\n return outTensor\r\n\r\n\r\nif __name__ == \"__main__\":\r\n read()\r\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.save", "numpy.ones" ] ]
quilan78/MSC_project
[ "18c0298bca013a62c09752446f7c391a29f38618" ]
[ "Tensorflow_network/model.py" ]
[ "import tensorflow as tf\r\nimport numpy as np\r\nimport time\r\nimport sys\r\n#sys.path.append('/home/dwt17/MSc_project/neural_sum_1/code/Proxy_network/Commons/')\r\nsys.path.append('../Commons/')\r\nfrom read_data import *\r\nfrom vocab import *\r\nfrom treatedData import *\t\r\nfrom batch import *\r\nfrom bahdanauCoverageAttention import *\r\nfrom attentionPointerWrapper import *\r\nimport os\r\n\r\n\r\nclass Model:\r\n\r\n\tdef __init__(self, cellSize = 128, batch_size = 15, max_encoding_length = 200, max_decoding_length = 50, vocab_size = 2000, embedding_size = 64, learning_rate = 0.0001, learning_decay = 0.8, minimum_rate = 0.000001, nbre_epoch = 50, display_batch_freq = 2, gradient_clip = 5, beam_width = 10, save_frequency = 1, coverage=False, pointer=False) :\r\n\t\tself.cellSize = cellSize # 256\r\n\t\tself.batch_size = batch_size\r\n\t\tself.max_encoding_length = max_encoding_length # 400\r\n\t\tself.max_decoding_length = max_decoding_length # 200\r\n\t\tself.vocab_size = vocab_size\r\n\t\tself.embedding_size = embedding_size # 128\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.learning_decay = learning_decay\r\n\t\tself.minimum_rate = minimum_rate\r\n\t\tself.nbre_epoch = nbre_epoch\r\n\t\tself.display_batch_freq = display_batch_freq\r\n\t\tself.gradient_clip = gradient_clip\r\n\t\tself.beam_width = beam_width\r\n\t\tself.save_frequency = save_frequency\r\n\t\tself.coverage = coverage\r\n\t\tself.pointer = pointer\r\n\r\n\tdef init_graph_and_data(self, task=\"train\", nb_data=100, data_path=\"../Data/finished_files/\", writting_path_batches=\"../Data/Batches\",create_batches=True):\r\n\t\tlearning_rate = self.learning_rate\r\n\t\tlearning_decay = self.learning_decay\r\n\t\tminimum_rate = self.minimum_rate\r\n\t\tnbre_epoch = self.nbre_epoch\r\n\t\tbatch_size = self.batch_size\r\n\t\tbeam_width = self.beam_width\r\n\r\n\t\tcellSize = self.cellSize\r\n\t\tbatch_size = self.batch_size\r\n\t\tmax_encoding_length= self.max_encoding_length\r\n\t\tmax_decoding_length = self.max_decoding_length\r\n\t\tvocab_size = self.vocab_size\r\n\t\tembedding_size = self.embedding_size\r\n\t\tgradient_clip = self.gradient_clip\r\n\r\n\r\n\r\n\t\tprint(\"Loading vocabulary\")\r\n\t\tvocab = Vocab(path=data_path)\r\n\t\tvocab_size = vocab.LoadVocab(max_size=vocab_size)\r\n\t\tself.vocab = vocab\r\n\t\tself.vocab_size = vocab_size # Making sure the vocabulary sizes matches\r\n\t\t# Tokens for the \r\n\t\tself.start_token = vocab.start_decode_id\r\n\t\tself.stop_token = vocab.stop_decode_id\r\n\t\tprint(\"vocabulary loaded, size : {}\".format(vocab_size))\r\n\r\n\t\tprint(\"Loading Data\")\r\n\t\tdata = Data(path=data_path)\r\n\r\n\t\tif task == \"train\":\r\n\t\t\tif create_batches == True:\r\n\t\t\t\tnb_batches = data.GenerateBatchesOnDisk(batch_size, vocab, max_text_length=max_encoding_length, max_abstract_length=max_decoding_length, max_data =nb_data, reading_file=\"train\", writting_path=writting_path_batches, pointer=self.pointer)\r\n\t\t\t\tmax_summary_length = max_decoding_length\r\n\t\t\telse:\r\n\t\t\t\tmax_summary_length = max_decoding_length\r\n\t\t\t\tnb_batches = nb_data // batch_size#\r\n\t\t\tself.max_summary_length=max_summary_length\r\n\t\t\tself.nb_batches = nb_batches\r\n\t\telif task ==\"test\":\r\n\t\t\tfilename = task\r\n\t\t\tinput_enc_batches, input_dec_batches, target_dec_batches, input_enc_seq_lengths, input_dec_seq_lengths, nb_batches = data.data_pipeline(batch_size,nb_example=nb_data,max_text_length = max_encoding_length, max_abstract_length = max_decoding_length, filename = filename)\r\n\t\t\tself.max_summary_length = data.max_abstract_length\r\n\t\t\tprint(\"Transforming words into id\")\r\n\t\t\tself.save_enc_input = input_enc_batches\r\n\t\t\tself.save_dec_output = target_dec_batches\r\n\t\t\tif self.pointer:\r\n\t\t\t\ttranslated_batches, oov_words, max_oovs = vocab.TranslateTextBatchesWithOOV(input_enc_batches)\r\n\t\t\t\tself.input_enc_oov = translated_batches\r\n\t\t\t\tself.max_oovs = max_oovs\r\n\t\t\t\tself.oov_words = oov_words\r\n\t\t\t\tself.input_dec_batches = vocab.TranslateBatches(input_dec_batches)\r\n\t\t\t\tself.input_enc_batches = vocab.TranslateBatches(input_enc_batches)\r\n\t\t\t\tself.target_dec_batches = vocab.TranslateSummaryBatchesWithOOV(target_dec_batches, oov_words)\r\n\t\t\telse:\r\n\t\t\t\tself.input_enc_batches = vocab.TranslateBatches(input_enc_batches)\r\n\t\t\t\tself.input_dec_batches = vocab.TranslateBatches(input_dec_batches)\r\n\t\t\t\tself.target_dec_batches = vocab.TranslateBatches(target_dec_batches)\r\n\t\t\t\tself.max_oovs = [0 for i in self.input_enc_batches]\r\n\t\t\tself.nb_batches = nb_batches\r\n\t\t\tself.input_enc_seq_lengths = input_enc_seq_lengths\r\n\t\t\tself.input_dec_seq_lengths = input_dec_seq_lengths\r\n\t\t\tprint(\"Inputs of rnn prepared\")\r\n\t\tprint(\"Data loaded\")\r\n\t\t#print(input_dec_seq_lengths)\r\n\r\n\r\n\t\tprint(\"Creating Graph\")\r\n\t\tself.create_Graph(task)\r\n\t\tprint(\"Graph created\")\r\n\t\t#For tensorboard\r\n\t\t#tf.summary.scalar('Loss', tf_loss)\r\n\t\tself.merged = tf.summary.merge_all()\r\n\r\n\t\t#Save the trained model\r\n\t\tself.saver = tf.train.Saver()\r\n\t\r\n\tdef create_Graph(self, task=\"train\"):\r\n\r\n\t\tself._create_placeholders()\r\n\r\n\t\tself._generate_Embeddings()\r\n\t\tself._generate_Encoder()\r\n\t\t\r\n\t\tself.reduce_transfered_states()\r\n\r\n\t\tself._generate_Decoder(task=task)\r\n\t\tif task == \"train\":\r\n\t\t\tself._generate_Optimisation()\r\n\r\n\tdef _create_placeholders(self):\r\n\r\n\r\n\t\tbatch_size = self.batch_size\r\n\t\tmax_encoding_length = self.max_encoding_length\r\n\t\tmax_decoding_length = self.max_decoding_length\r\n\r\n\r\n\t\t#Batch Major\r\n\t\tinput_enc_batch = tf.placeholder(tf.int32, [batch_size, max_encoding_length], name=\"input_enc_batch\")\r\n\t\tinput_dec_batch = tf.placeholder(tf.int32, [batch_size, max_decoding_length], name=\"input_dec_batch\")\r\n\t\ttarget_dec_batch = tf.placeholder(tf.int32, [batch_size, max_decoding_length], name=\"target_dec_batch\")\r\n\r\n\t\t#Length of text/summary\r\n\t\tinput_enc_seq_lengths = tf.placeholder(tf.int32, [batch_size], name=\"input_enc_seq_lengths\")\r\n\t\tinput_dec_seq_lengths = tf.placeholder(tf.int32, [batch_size], name=\"input_dec_seq_lengths\")\r\n\t\tmax_summary_length = tf.constant(max_decoding_length, dtype=tf.int32, name=\"max_summary_length\")\r\n\t\tmax_text_length = tf.constant(max_encoding_length, dtype=tf.int32, name=\"text_length\")\r\n\t\tfake_summary_length = tf.constant(max_decoding_length, dtype=tf.int32, shape=[batch_size])\r\n\t\t#Hyperparameters\r\n\t\tlearning_rate = tf.placeholder(tf.float32, name=\"learning_rate\")\r\n\t\tcoverage_multiplier = tf.constant(1, tf.float32, name=\"coverage_multiplier\")\r\n\r\n\t\t#For pointer network\r\n\t\tbatch_max_oov = tf.placeholder(tf.int32, shape=(), name=\"batch_max_oov\")\r\n\r\n\t\tself.tf_input_enc_batch = input_enc_batch\r\n\t\tself.tf_input_dec_batch = input_dec_batch\r\n\t\tself.tf_target_dec_batch = target_dec_batch\r\n\t\tself.tf_input_enc_seq_lengths = input_enc_seq_lengths\r\n\t\tself.tf_input_dec_seq_lengths = input_dec_seq_lengths\r\n\t\tself.tf_max_summary_length = max_summary_length\r\n\t\tself.tf_max_text_length = max_text_length\r\n\t\tself.tf_fake_summary_length = fake_summary_length\r\n\t\tself.tf_learning_rate = learning_rate\r\n\t\tself.coverage_multiplier = coverage_multiplier\r\n\t\tself.tf_batch_max_oov = batch_max_oov\r\n\r\n\r\n\tdef _generate_Embeddings(self):\r\n\r\n\t\tvocab_size = self.vocab_size\r\n\t\tembedding_size = self.embedding_size\r\n\t\tinput_enc_batch = self.tf_input_enc_batch\r\n\t\tinput_dec_batch = self.tf_input_dec_batch\r\n\r\n\r\n\t\twith tf.variable_scope(\"embedding\"):\r\n\t\t\tembedding = tf.get_variable(\"embedding_encoder\", [vocab_size, embedding_size], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\r\n\t\t\ttf.summary.histogram('embedding', embedding)\r\n\t\t\temb_enc_batch = tf.nn.embedding_lookup(embedding, input_enc_batch)\r\n\t\t\temb_dec_batch = tf.nn.embedding_lookup(embedding, input_dec_batch)\r\n\r\n\t\tself.emb_enc_batch = emb_enc_batch\r\n\t\tself.emb_dec_batch = emb_dec_batch\r\n\t\tself.embedding_matrix = embedding\r\n\r\n\tdef _generate_Encoder(self):\r\n\t\tcellSize = self.cellSize\r\n\t\tinput_batch = self.emb_enc_batch\r\n\t\tseq_length_batch = self.tf_input_enc_seq_lengths\r\n\r\n\t\twith tf.variable_scope(\"encoder\"):\r\n\t\t\tforward_LSTM = tf.contrib.rnn.LSTMCell(cellSize,\r\n\t\t\t\t\t\t\t\t\t\t\t initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2), forget_bias=1.0, name=\"forward_cell\")\r\n\t\t\tbackward_LSTM = tf.contrib.rnn.LSTMCell(cellSize,\r\n\t\t\t\t\t\t\t\t\t\t\t initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2), forget_bias=1.0, name=\"backward_cell\")\r\n\t\t\tenc_outputs, enc_states = tf.nn.bidirectional_dynamic_rnn(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tforward_LSTM,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbackward_LSTM,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinput_batch,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsequence_length= seq_length_batch,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=tf.float32)\r\n\t\t\tenc_outputs = tf.concat(enc_outputs, 2)\r\n\t\t\tforward_state, backward_state = enc_states\r\n\t\tself.enc_outputs = enc_outputs\r\n\t\tself.forward_state = forward_state\r\n\t\tself.backward_state = backward_state\r\n\r\n\tdef reduce_transfered_states(self):\r\n\t\tcellSize = self.cellSize\r\n\t\tforward_state = self.forward_state\r\n\t\tbackward_state = self.backward_state\r\n\r\n\t\t#In the article, he build a 1 layer neural net\r\n\t\twith tf.variable_scope('reduce_transfered_states'):\r\n\r\n\r\n\t\t\tW_c = tf.get_variable('W_c', [2*cellSize, cellSize], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\r\n\t\t\tW_h = tf.get_variable('W_h', [2*cellSize, cellSize], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\r\n\t\t\tb_c = tf.get_variable('b_c', [cellSize], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\r\n\t\t\tb_h = tf.get_variable('b_h', [cellSize], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\r\n\r\n\r\n\t\t\tconcat_c = tf.concat(axis=1, values=[forward_state.c, backward_state.c])\r\n\t\t\tconcat_h = tf.concat(axis=1, values=[forward_state.h, backward_state.h])\r\n\r\n\t\t\tnew_c = tf.matmul(concat_c, W_c) + b_c\r\n\t\t\tnew_h = tf.matmul(concat_h, W_h) + b_h\r\n\r\n\t\tself.encoder_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h) # Return new cell and state\r\n\r\n\r\n\tdef _generate_Decoder(self, task=\"train\"):\r\n\t\t\r\n\t\tcellSize = self.cellSize\r\n\t\tenc_input_length = self.tf_input_enc_seq_lengths\r\n\t\tenc_outputs = self.enc_outputs\r\n\t\tenc_state = self.encoder_state\r\n\t\tvocab_size = self.vocab_size\r\n\t\tbatch_size = self.batch_size\r\n\t\tbeam_width = self.beam_width\r\n\r\n\t\tif self.coverage:\r\n\t\t\tAttention_mech_chosen = BahdanauCoverageAttention\r\n\t\telse:\r\n\t\t\tAttention_mech_chosen = tf.contrib.seq2seq.BahdanauAttention\r\n\r\n\t\tprint(Attention_mech_chosen)\r\n\t\t\r\n\t\tif task==\"train\":\r\n\t\t\twith tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\r\n\t\t\t\tenc_outputs = tf.contrib.seq2seq.tile_batch(enc_outputs, multiplier=1)\r\n\t\t\t\tenc_state = tf.contrib.seq2seq.tile_batch(enc_state, multiplier=1)\r\n\t\t\t\tenc_input_length = tf.contrib.seq2seq.tile_batch(enc_input_length, multiplier=1)\r\n\t\t\t\tself.decoder_cell = tf.contrib.rnn.LSTMCell(cellSize, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2), forget_bias=1.0, name=\"cell\")\r\n\r\n\t\t\t\tself.attention_mechanism = Attention_mech_chosen(cellSize,\r\n\t\t enc_outputs,\r\n\t\t enc_input_length,\r\n\t\t normalize=False,\r\n\t\t name=\"attention_model\")\r\n\r\n\t\t\t\tself.decoder_cell = AttentionPointerWrapper(self.decoder_cell,\r\n\t\t self.attention_mechanism,\r\n\t\t cellSize,\r\n\t\t alignment_history=True,\r\n\t\t initial_cell_state=enc_state,\r\n\t\t output_attention=False,\r\n\t\t pointer=self.pointer,\r\n\t\t name=\"attention_wrapper\")\r\n\r\n\t\t\t\tself.initial_state_normal = self.decoder_cell.zero_state(batch_size, dtype=tf.float32).clone(cell_state=enc_state)\r\n\r\n\t\t\t\tself.projection_layer = tf.layers.Dense(vocab_size)\r\n\t\t\t\t#kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1),activation=tf.nn.softmax\r\n\r\n\t\t\t\tself._generate_Decoder_training()\r\n\r\n\t\telif task==\"test\":\r\n\t\t\twith tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\r\n\t\t\t\ttiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(enc_outputs, multiplier=1)\r\n\t\t\t\ttiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(enc_state, multiplier=1)\r\n\t\t\t\ttiled_sequence_length = tf.contrib.seq2seq.tile_batch(enc_input_length, multiplier=1)\r\n\t\t\t\tself.decoder_cell_original = tf.contrib.rnn.LSTMCell(cellSize, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2), name=\"cell\")\r\n\r\n\t\t\t\tself.attention_mechanism = Attention_mech_chosen(cellSize,\r\n\t\t enc_outputs,\r\n\t\t enc_input_length,\r\n\t\t normalize=False,\r\n\t\t name=\"attention_model\")\r\n\r\n\t\t\t\tself.decoder_cell = AttentionPointerWrapper(self.decoder_cell_original,\r\n\t\t self.attention_mechanism,\r\n\t\t cellSize,\r\n\t\t alignment_history=True,\r\n\t\t initial_cell_state=enc_state,\r\n\t\t output_attention=False,\r\n\t\t pointer=self.pointer,\r\n\t\t name=\"attention_wrapper\")\r\n\r\n\t\t\t\tself.initial_state_normal = self.decoder_cell.zero_state(batch_size, dtype=tf.float32).clone(cell_state=enc_state)\r\n\r\n\t\t\t\tself.projection_layer = tf.layers.Dense(vocab_size)\r\n\t\t\t\tself._generate_Decoder_prediction()\r\n\t\t\t\t#kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1),activation=tf.nn.softmax\r\n\t\t\twith tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\r\n\t\t\t\ttiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(enc_outputs, multiplier=beam_width)\r\n\t\t\t\ttiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(enc_state, multiplier=beam_width)\r\n\t\t\t\ttiled_sequence_length = tf.contrib.seq2seq.tile_batch(enc_input_length, multiplier=beam_width)\r\n\r\n\t\t\t\tself.attention_mechanism_beam = Attention_mech_chosen(cellSize,\r\n\t tiled_encoder_outputs,\r\n\t tiled_sequence_length,\r\n\t normalize=False,\r\n\t name=\"attention_model\")\r\n\r\n\t\t\t\tself.decoder_cell = AttentionPointerWrapper(self.decoder_cell_original,\r\n\t\t self.attention_mechanism_beam,\r\n\t\t cellSize,\r\n\t\t initial_cell_state=tiled_encoder_final_state,\r\n\t\t alignment_history=True,\r\n\t\t output_attention=False,\r\n\t\t pointer=self.pointer,\r\n\t\t name=\"attention_wrapper\")\r\n\r\n\t\t\t\t# Replicate encoder infos beam_width times\r\n\t\t\t\tdecoder_initial_state = self.decoder_cell.zero_state(dtype=tf.float32, batch_size=batch_size * beam_width)\r\n\t\t\t\tself.decoder_initial_state_beam = decoder_initial_state.clone(cell_state=tiled_encoder_final_state)\r\n\r\n\t\t\t\t#self._generate_Decoder_prediction_beam()\r\n\r\n\r\n\tdef _generate_Decoder_prediction(self):\r\n\t\tdecoder_cell = self.decoder_cell\r\n\t\tprojection_layer = self.projection_layer\r\n\t\tmax_summary_length = self.tf_max_summary_length\r\n\t\tenc_state = self.initial_state_normal\r\n\t\tembedding = self.embedding_matrix\r\n\t\tstart_token = self.start_token\r\n\t\tstop_token = self.stop_token\r\n\t\tbatch_size = self.batch_size\r\n\r\n\t\twith tf.variable_scope(\"decoder_training\", reuse=tf.AUTO_REUSE):\r\n\t\t\tinference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding,\r\n\t tf.fill([batch_size], start_token),\r\n\t stop_token)\r\n\r\n\t\t\tdecoder_greedy = tf.contrib.seq2seq.BasicDecoder(decoder_cell, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t inference_helper,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t enc_state,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t output_layer=projection_layer)\r\n\r\n\t\t\toutputs_greedy, state_greedy, _= tf.contrib.seq2seq.dynamic_decode(decoder_greedy,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timpute_finished=True,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaximum_iterations=max_summary_length)\r\n\t\t\r\n\t\tself.output_prediction_greedy = outputs_greedy.sample_id\r\n\t\tself.logits_prediction_greedy = outputs_greedy.rnn_output\r\n\t\tself.dec_states_greedy = state_greedy\r\n\r\n\tdef _generate_Decoder_training(self):\r\n\r\n\t\tdecoder_cell = self.decoder_cell\r\n\t\tprojection_layer = self.projection_layer\r\n\t\tdec_input = self.emb_dec_batch\r\n\t\tdec_input_length = self.tf_fake_summary_length\r\n\t\tmax_summary_length = self.tf_max_summary_length\r\n\t\tenc_state = self.initial_state_normal\r\n\t\tbatch_size = self.batch_size\r\n\r\n\t\twith tf.variable_scope(\"decoder_training\"):\r\n\r\n\t\t\thelper= tf.contrib.seq2seq.TrainingHelper(dec_input, dec_input_length)\r\n\r\n\t\t\tdecoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t helper,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t enc_state,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t output_layer=projection_layer)\r\n\t\t\toutputs, final_state, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder,impute_finished=True,maximum_iterations=max_summary_length)\r\n\r\n\t\t\tif self.pointer:\r\n\t\t\t\tfinal_dist = self.compute_pointer_distribution(outputs.rnn_output, final_state.alignment_history.stack(), final_state.pgen.stack())\r\n\t\t\telse:\r\n\t\t\t\tfinal_dist = outputs.rnn_output\r\n\t\tself.outputs_training = final_dist\r\n\t\tself.final_state_training = final_state\r\n\r\n\tdef _generate_Decoder_prediction_beam(self):\r\n\t\tcellSize = self.cellSize\r\n\t\tdecoder_cell = self.decoder_cell\r\n\t\tprojection_layer = self.projection_layer\r\n\t\tmax_summary_length = self.tf_max_summary_length\r\n\t\tenc_outputs = self.enc_outputs\r\n\t\tenc_state = self.encoder_state\r\n\t\tenc_input_length = self.tf_input_enc_seq_lengths\r\n\t\tembedding = self.embedding_matrix\r\n\t\tstart_token = self.start_token\r\n\t\tstop_token = self.stop_token\r\n\t\tbatch_size = self.batch_size\r\n\t\tbeam_width = self.beam_width\r\n\r\n\t\t#Attention mechanism for beam search\r\n\t\twith tf.variable_scope(\"decoder_training\", reuse=tf.AUTO_REUSE):\r\n\t\t\t#Generating specific sizes for all tensors for attention\r\n\r\n\r\n\t\t\t#print(decoder_initial_state_beam.c.get_shape())\r\n\t\t\t# Define a beam-search decoder\r\n\t\t\tdecoder_beam = tf.contrib.seq2seq.BeamSearchDecoder(\r\n\t\t\t\t cell=self.decoder_cell,\r\n\t\t\t\t embedding=embedding,\r\n\t\t\t\t start_tokens=tf.fill([batch_size], start_token),\r\n\t\t\t\t end_token=stop_token,\r\n\t\t\t\t initial_state=self.decoder_initial_state_beam,\r\n\t\t\t\t beam_width=beam_width,\r\n\t\t\t\t output_layer=projection_layer,\r\n\t\t\t\t length_penalty_weight=0.0)\r\n\t\t\t#Beams are ordered from best to worse\r\n\t\t\toutput_beam, state_beam, _ = tf.contrib.seq2seq.dynamic_decode(decoder_beam,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timpute_finished=False,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaximum_iterations=max_summary_length)\r\n\r\n\r\n\t\t\tself.output_prediction_beam = output_beam.predicted_ids\r\n\t\t\tself.dec_states_beam = state_beam\r\n\r\n\r\n\tdef compute_pointer_distribution(self, logits, attention, pgen):\r\n\t\tprint(self.tf_input_enc_batch.get_shape())\r\n\r\n\t\tpgen_extended = tf.expand_dims(pgen, [2])\r\n\r\n\t\t# Is the attention * (1-pgen) for each TT\r\n\t\tattention_dist = tf.transpose(attention * (1-pgen_extended), perm=[1,0,2])\r\n\t\t#Is logits * pgen for each TT\r\n\t\tvocab_dist = logits * tf.transpose(pgen_extended, perm=[1,0,2])\r\n\r\n\t\t#Adding the extra words to the vocabulary\r\n\t\tnew_zeros = tf.zeros((self.batch_size,tf.shape(vocab_dist)[1], self.tf_batch_max_oov))\r\n\t\tvocab_dist = tf.concat([vocab_dist, new_zeros],2)\r\n\r\n\t\t#Adding the attention distributions\r\n\t\tshape = tf.shape(vocab_dist)\r\n\t\textented_enc_batch = tf.tile(tf.expand_dims(self.tf_input_enc_batch, [1]), [1, tf.shape(vocab_dist)[1], 1])\r\n\t\ti1, i2 = tf.meshgrid(tf.range(self.batch_size),\r\n tf.range(tf.shape(vocab_dist)[1]), indexing=\"ij\")\r\n\t\ti1 = tf.tile(i1[:, :, tf.newaxis], [1, 1, tf.shape(extented_enc_batch)[2]])\r\n\t\ti2 = tf.tile(i2[:, :, tf.newaxis], [1, 1, tf.shape(extented_enc_batch)[2]])\r\n\t\t# Create final indices\r\n\t\tidx = tf.stack([i1, i2, extented_enc_batch], axis=-1)\r\n\r\n\r\n\t\tattention_vocab = tf.scatter_nd(idx, attention_dist, shape)\r\n\r\n\t\tfinal_dist = attention_vocab + vocab_dist\r\n\t\treturn final_dist\r\n\r\n\r\n\tdef _generate_Optimisation(self):\r\n\t\tbatch_size = self.batch_size\r\n\t\toutput_batch = self.outputs_training\r\n\t\ttarget_batch = self.tf_target_dec_batch\r\n\t\tsummaries_length = self.tf_input_dec_seq_lengths\r\n\t\tmax_summary_length = self.tf_max_summary_length\r\n\t\tmax_encoding_length = self.tf_max_text_length\r\n\t\tlearning_rate = self.learning_rate\r\n\t\tgradient_clip = self.gradient_clip\r\n\t\tcoverage_multiplier = self.coverage_multiplier\r\n\r\n\t\twith tf.variable_scope(\"optimisation\"):\r\n\t\t\t#mask is of shape [batch_size, max_decoding_length]\r\n\t\t\tmask = tf.sequence_mask(summaries_length, maxlen = max_summary_length, dtype=tf.float32)\r\n\t\t\tlogit = tf.identity(output_batch)\r\n\t\t\t#Loss of the sequence\r\n\t\t\tcost = tf.contrib.seq2seq.sequence_loss(logit, target_batch, mask)\r\n\r\n\r\n\t\t\t#Loss of the coverage part\r\n\t\t\tif self.coverage:\r\n\t\t\t\t#alignment history of size [Dec_length, batch_size, encoder_length]\r\n\t\t\t\talignment_history = self.final_state_training.alignment_history.stack()\r\n\t\t\t\tcoverage = tf.cumsum(alignment_history, axis=0, exclusive=True)\r\n\t\t\t\t#print(coverage.get_shape())\r\n\t\t\t\t#cov loss of shape [dec_length, batch_size]\r\n\t\t\t\tcoverage_loss = tf.reduce_sum(tf.minimum(alignment_history, coverage), [2])\r\n\t\t\t\tmasked_coverage_loss = tf.reduce_sum(tf.transpose(coverage_loss) * mask) / tf.to_float(max_encoding_length, name='ToFloat')\r\n\t\t\t\t#print(cost.get_shape())\r\n\t\t\t\tloss = (cost + coverage_multiplier * masked_coverage_loss)/ batch_size\r\n\t\t\telse:\r\n\t\t\t\tloss = (cost)/ batch_size\r\n\r\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate)\r\n\r\n\r\n\r\n\t\t\tparams = tf.trainable_variables()\r\n\t\t\tgradients = tf.gradients(loss, params)\r\n\t\t\tclipped_gradients, _ = tf.clip_by_global_norm(gradients, gradient_clip)\r\n\r\n\r\n\t\t\tupdate_step = optimizer.apply_gradients(zip(clipped_gradients, params))\r\n\t\tself.tf_loss = loss\r\n\t\tself.tf_update_step = update_step\r\n\r\nif __name__ == \"__main__\":\r\n\tnet = Seq2SeqSummarisation()\r\n\tnet.train(nb_data = 20000, create_batches=False, load_from_checkpoint=True)\r\n\t#net.infer()" ]
[ [ "tensorflow.concat", "tensorflow.stack", "tensorflow.minimum", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.cumsum", "tensorflow.random_uniform_initializer", "tensorflow.gradients", "tensorflow.to_float", "tensorflow.train.Saver", "tensorflow.trainable_variables", "tensorflow.contrib.seq2seq.BasicDecoder", "tensorflow.matmul", "tensorflow.fill", "tensorflow.shape", "tensorflow.identity", "tensorflow.placeholder", "tensorflow.scatter_nd", "tensorflow.layers.Dense", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.summary.merge_all", "tensorflow.sequence_mask", "tensorflow.nn.embedding_lookup", "tensorflow.summary.histogram", "tensorflow.contrib.seq2seq.TrainingHelper", "tensorflow.contrib.seq2seq.dynamic_decode", "tensorflow.constant", "tensorflow.transpose", "tensorflow.range", "tensorflow.contrib.seq2seq.sequence_loss", "tensorflow.expand_dims", "tensorflow.contrib.seq2seq.tile_batch", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope" ] ]
skeleton973213/class_schedule
[ "0a9354f4f5e2a2c292b7c0449569c37a63074600" ]
[ "class_info.py" ]
[ "import pandas as pd\nimport openpyxl\n\nraw_data = pd.read_excel(io=\"RAI_schedule.xlsx\", sheet_name=\"ROBOTICS & AI 12021\")" ]
[ [ "pandas.read_excel" ] ]
vidakDK/colour
[ "dd27c140ef9782e9718e0fa6e36800a958ab1c00", "dd27c140ef9782e9718e0fa6e36800a958ab1c00" ]
[ "utilities/generate_plots.py", "colour/models/tests/test_ipt.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGenerate Plots\n==============\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nimport colour\nfrom colour.plotting import *\nfrom colour.plotting.diagrams import (\n plot_spectral_locus, plot_chromaticity_diagram_colours,\n plot_chromaticity_diagram, plot_sds_in_chromaticity_diagram)\nfrom colour.plotting.models import (\n plot_RGB_colourspaces_in_chromaticity_diagram,\n plot_RGB_chromaticities_in_chromaticity_diagram,\n plot_ellipses_MacAdam1942_in_chromaticity_diagram)\nfrom colour.plotting.quality import plot_colour_quality_bars\nfrom colour.plotting.temperature import (\n plot_planckian_locus, plot_planckian_locus_in_chromaticity_diagram)\nfrom colour.utilities import domain_range_scale\n\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['generate_documentation_plots']\n\n\ndef generate_documentation_plots(output_directory):\n \"\"\"\n Generates documentation plots.\n\n Parameters\n ----------\n output_directory : unicode\n Output directory.\n \"\"\"\n\n colour.utilities.filter_warnings()\n\n colour_style()\n\n np.random.seed(0)\n\n # *************************************************************************\n # \"README.rst\"\n # *************************************************************************\n arguments = {\n 'tight_layout':\n True,\n 'transparent_background':\n True,\n 'filename':\n os.path.join(output_directory,\n 'Examples_Plotting_Visible_Spectrum.png')\n }\n plot_visible_spectrum('CIE 1931 2 Degree Standard Observer', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_Illuminant_F1_SD.png')\n plot_single_illuminant_sd('FL1', **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Examples_Plotting_Blackbodies.png')\n blackbody_sds = [\n colour.sd_blackbody(i, colour.SpectralShape(0, 10000, 10))\n for i in range(1000, 15000, 1000)\n ]\n plot_multi_sds(\n blackbody_sds,\n y_label='W / (sr m$^2$) / m',\n use_sds_colours=True,\n normalise_sds_colours=True,\n legend_location='upper right',\n bounding_box=(0, 1250, 0, 2.5e15),\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_Cone_Fundamentals.png')\n plot_single_cmfs(\n 'Stockman & Sharpe 2 Degree Cone Fundamentals',\n y_label='Sensitivity',\n bounding_box=(390, 870, 0, 1.1),\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_Luminous_Efficiency.png')\n sd_mesopic_luminous_efficiency_function = (\n colour.sd_mesopic_luminous_efficiency_function(0.2))\n plot_multi_sds(\n (sd_mesopic_luminous_efficiency_function,\n colour.PHOTOPIC_LEFS['CIE 1924 Photopic Standard Observer'],\n colour.SCOTOPIC_LEFS['CIE 1951 Scotopic Standard Observer']),\n y_label='Luminous Efficiency',\n legend_location='upper right',\n y_tighten=True,\n margins=(0, 0, 0, .1),\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_BabelColor_Average.png')\n plot_multi_sds(\n colour.COLOURCHECKERS_SDS['BabelColor Average'].values(),\n use_sds_colours=True,\n title=('BabelColor Average - '\n 'Spectral Distributions'),\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_ColorChecker_2005.png')\n plot_single_colour_checker(\n 'ColorChecker 2005', text_parameters={'visible': False}, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Examples_Plotting_Chromaticities_Prediction.png')\n plot_corresponding_chromaticities_prediction(2, 'Von Kries', 'Bianco',\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Examples_Plotting_CCT_CIE_1960_UCS_Chromaticity_Diagram.png')\n plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(['A', 'B', 'C'],\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Examples_Plotting_Chromaticities_CIE_1931_Chromaticity_Diagram.png')\n RGB = np.random.random((32, 32, 3))\n plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(\n RGB,\n 'ITU-R BT.709',\n colourspaces=['ACEScg', 'S-Gamut'],\n show_pointer_gamut=True,\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Examples_Plotting_CRI.png')\n plot_single_sd_colour_rendering_index_bars(colour.ILLUMINANTS_SDS['FL2'],\n **arguments)\n\n # *************************************************************************\n # Documentation\n # *************************************************************************\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_CVD_Simulation_Machado2009.png')\n plot_cvd_simulation_Machado2009(RGB, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Colour_Checker.png')\n plot_single_colour_checker('ColorChecker 2005', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Colour_Checkers.png')\n plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'],\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Single_SD.png')\n data = {\n 500: 0.0651,\n 520: 0.0705,\n 540: 0.0772,\n 560: 0.0870,\n 580: 0.1128,\n 600: 0.1360\n }\n sd = colour.SpectralDistribution(data, name='Custom')\n plot_single_sd(sd, **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Multi_SDs.png')\n data_1 = {\n 500: 0.004900,\n 510: 0.009300,\n 520: 0.063270,\n 530: 0.165500,\n 540: 0.290400,\n 550: 0.433450,\n 560: 0.594500\n }\n data_2 = {\n 500: 0.323000,\n 510: 0.503000,\n 520: 0.710000,\n 530: 0.862000,\n 540: 0.954000,\n 550: 0.994950,\n 560: 0.995000\n }\n spd1 = colour.SpectralDistribution(data_1, name='Custom 1')\n spd2 = colour.SpectralDistribution(data_2, name='Custom 2')\n plot_multi_sds([spd1, spd2], **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Single_CMFS.png')\n plot_single_cmfs('CIE 1931 2 Degree Standard Observer', **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Multi_CMFS.png')\n cmfs = ('CIE 1931 2 Degree Standard Observer',\n 'CIE 1964 10 Degree Standard Observer')\n plot_multi_cmfs(cmfs, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Illuminant_SD.png')\n plot_single_illuminant_sd('A', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Illuminant_SDs.png')\n plot_multi_illuminant_sds(['A', 'B', 'C'], **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Visible_Spectrum.png')\n plot_visible_spectrum(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Lightness_Function.png')\n plot_single_lightness_function('CIE 1976', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Lightness_Functions.png')\n plot_multi_lightness_functions(['CIE 1976', 'Wyszecki 1963'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Luminance_Function.png')\n plot_single_luminance_function('CIE 1976', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Luminance_Functions.png')\n plot_multi_luminance_functions(['CIE 1976', 'Newhall 1943'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Blackbody_Spectral_Radiance.png')\n plot_blackbody_spectral_radiance(\n 3500, blackbody='VY Canis Major', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Blackbody_Colours.png')\n plot_blackbody_colours(colour.SpectralShape(150, 12500, 50), **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Colour_Swatch.png')\n RGB = ColourSwatch(RGB=(0.32315746, 0.32983556, 0.33640183))\n plot_single_colour_swatch(RGB, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Colour_Swatches.png')\n RGB_1 = ColourSwatch(RGB=(0.45293517, 0.31732158, 0.26414773))\n RGB_2 = ColourSwatch(RGB=(0.77875824, 0.57726450, 0.50453169))\n plot_multi_colour_swatches([RGB_1, RGB_2], **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Single_Function.png')\n plot_single_function(lambda x: x ** (1 / 2.2), **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Multi_Functions.png')\n functions = {\n 'Gamma 2.2': lambda x: x ** (1 / 2.2),\n 'Gamma 2.4': lambda x: x ** (1 / 2.4),\n 'Gamma 2.6': lambda x: x ** (1 / 2.6),\n }\n plot_multi_functions(functions, **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Image.png')\n path = os.path.join(colour.__path__[0], '..', 'docs', '_static',\n 'Logo_Medium_001.png')\n plot_image(colour.read_image(str(path)), **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Corresponding_Chromaticities_Prediction.png')\n plot_corresponding_chromaticities_prediction(1, 'Von Kries', 'CAT02',\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Spectral_Locus.png')\n plot_spectral_locus(spectral_locus_colours='RGB', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Chromaticity_Diagram_Colours.png')\n plot_chromaticity_diagram_colours(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Chromaticity_Diagram.png')\n plot_chromaticity_diagram(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1931.png')\n plot_chromaticity_diagram_CIE1931(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png')\n plot_chromaticity_diagram_CIE1960UCS(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png')\n plot_chromaticity_diagram_CIE1976UCS(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_SDs_In_Chromaticity_Diagram.png')\n A = colour.ILLUMINANTS_SDS['A']\n D65 = colour.ILLUMINANTS_SDS['D65']\n plot_sds_in_chromaticity_diagram([A, D65], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1931.png')\n plot_sds_in_chromaticity_diagram_CIE1931([A, D65], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1960UCS.png')\n plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_SDs_In_Chromaticity_Diagram_CIE1976UCS.png')\n plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65], **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Pointer_Gamut.png')\n plot_pointer_gamut(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram.png')\n plot_RGB_colourspaces_in_chromaticity_diagram(\n ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1931.png')\n plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931(\n ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'\n 'Chromaticity_Diagram_CIE1960UCS.png')\n plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS(\n ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Colourspaces_In_'\n 'Chromaticity_Diagram_CIE1976UCS.png')\n plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS(\n ['ITU-R BT.709', 'ACEScg', 'S-Gamut'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'\n 'Chromaticity_Diagram_Plot.png')\n RGB = np.random.random((128, 128, 3))\n plot_RGB_chromaticities_in_chromaticity_diagram(RGB, 'ITU-R BT.709',\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'\n 'Chromaticity_Diagram_CIE1931.png')\n plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(\n RGB, 'ITU-R BT.709', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'\n 'Chromaticity_Diagram_CIE1960UCS.png')\n plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS(\n RGB, 'ITU-R BT.709', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Chromaticities_In_'\n 'Chromaticity_Diagram_CIE1976UCS.png')\n plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS(\n RGB, 'ITU-R BT.709', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram.png')\n plot_ellipses_MacAdam1942_in_chromaticity_diagram(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'\n 'Chromaticity_Diagram_CIE1931.png')\n plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'\n 'Chromaticity_Diagram_CIE1960UCS.png')\n plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Ellipses_MacAdam1942_In_'\n 'Chromaticity_Diagram_CIE1976UCS.png')\n plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS(**arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Single_CCTF.png')\n plot_single_cctf('ITU-R BT.709', **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Multi_CCTFs.png')\n plot_multi_cctfs(['ITU-R BT.709', 'sRGB'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_Munsell_Value_Function.png')\n plot_single_munsell_value_function('ASTM D1535-08', **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Multi_Munsell_Value_Functions.png')\n plot_multi_munsell_value_functions(['ASTM D1535-08', 'McCamy 1987'],\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Single_SD_Rayleigh_Scattering.png')\n plot_single_sd_rayleigh_scattering(**arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_The_Blue_Sky.png')\n plot_the_blue_sky(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_Colour_Quality_Bars.png')\n illuminant = colour.ILLUMINANTS_SDS['FL2']\n light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']\n light_source = light_source.copy().align(colour.SpectralShape(360, 830, 1))\n cqs_i = colour.colour_quality_scale(illuminant, additional_data=True)\n cqs_l = colour.colour_quality_scale(light_source, additional_data=True)\n plot_colour_quality_bars([cqs_i, cqs_l], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Single_SD_Colour_Rendering_Index_Bars.png')\n illuminant = colour.ILLUMINANTS_SDS['FL2']\n plot_single_sd_colour_rendering_index_bars(illuminant, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Multi_SDs_Colour_Rendering_Indexes_Bars.png')\n light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']\n plot_multi_sds_colour_rendering_indexes_bars([illuminant, light_source],\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Single_SD_Colour_Quality_Scale_Bars.png')\n illuminant = colour.ILLUMINANTS_SDS['FL2']\n plot_single_sd_colour_quality_scale_bars(illuminant, **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Multi_SDs_Colour_Quality_Scales_Bars.png')\n light_source = colour.LIGHT_SOURCES_SDS['Kinoton 75P']\n plot_multi_sds_colour_quality_scales_bars([illuminant, light_source],\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_Planckian_Locus.png')\n plot_planckian_locus(**arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram.png')\n plot_planckian_locus_in_chromaticity_diagram(['A', 'B', 'C'], **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1931.png')\n plot_planckian_locus_in_chromaticity_diagram_CIE1931(['A', 'B', 'C'],\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory,\n 'Plotting_Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1960UCS.png')\n plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(['A', 'B', 'C'],\n **arguments)\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')\n plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Plotting_Plot_RGB_Colourspaces_Gamuts.png')\n plot_RGB_colourspaces_gamuts(['ITU-R BT.709', 'ACEScg', 'S-Gamut'],\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Plotting_Plot_RGB_Scatter.png')\n plot_RGB_scatter(RGB, 'ITU-R BT.709', **arguments)\n\n # *************************************************************************\n # \"tutorial.rst\"\n # *************************************************************************\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_Visible_Spectrum.png')\n plot_visible_spectrum(**arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_Sample_SD.png')\n sample_sd_data = {\n 380: 0.048,\n 385: 0.051,\n 390: 0.055,\n 395: 0.060,\n 400: 0.065,\n 405: 0.068,\n 410: 0.068,\n 415: 0.067,\n 420: 0.064,\n 425: 0.062,\n 430: 0.059,\n 435: 0.057,\n 440: 0.055,\n 445: 0.054,\n 450: 0.053,\n 455: 0.053,\n 460: 0.052,\n 465: 0.052,\n 470: 0.052,\n 475: 0.053,\n 480: 0.054,\n 485: 0.055,\n 490: 0.057,\n 495: 0.059,\n 500: 0.061,\n 505: 0.062,\n 510: 0.065,\n 515: 0.067,\n 520: 0.070,\n 525: 0.072,\n 530: 0.074,\n 535: 0.075,\n 540: 0.076,\n 545: 0.078,\n 550: 0.079,\n 555: 0.082,\n 560: 0.087,\n 565: 0.092,\n 570: 0.100,\n 575: 0.107,\n 580: 0.115,\n 585: 0.122,\n 590: 0.129,\n 595: 0.134,\n 600: 0.138,\n 605: 0.142,\n 610: 0.146,\n 615: 0.150,\n 620: 0.154,\n 625: 0.158,\n 630: 0.163,\n 635: 0.167,\n 640: 0.173,\n 645: 0.180,\n 650: 0.188,\n 655: 0.196,\n 660: 0.204,\n 665: 0.213,\n 670: 0.222,\n 675: 0.231,\n 680: 0.242,\n 685: 0.251,\n 690: 0.261,\n 695: 0.271,\n 700: 0.282,\n 705: 0.294,\n 710: 0.305,\n 715: 0.318,\n 720: 0.334,\n 725: 0.354,\n 730: 0.372,\n 735: 0.392,\n 740: 0.409,\n 745: 0.420,\n 750: 0.436,\n 755: 0.450,\n 760: 0.462,\n 765: 0.465,\n 770: 0.448,\n 775: 0.432,\n 780: 0.421\n }\n\n sd = colour.SpectralDistribution(sample_sd_data, name='Sample')\n plot_single_sd(sd, **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_SD_Interpolation.png')\n sd_copy = sd.copy()\n sd_copy.interpolate(colour.SpectralShape(400, 770, 1))\n plot_multi_sds(\n [sd, sd_copy], bounding_box=[730, 780, 0.25, 0.5], **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_Sample_Swatch.png')\n sd = colour.SpectralDistribution(sample_sd_data)\n cmfs = colour.STANDARD_OBSERVERS_CMFS[\n 'CIE 1931 2 Degree Standard Observer']\n illuminant = colour.ILLUMINANTS_SDS['D65']\n with domain_range_scale('1'):\n XYZ = colour.sd_to_XYZ(sd, cmfs, illuminant)\n RGB = colour.XYZ_to_sRGB(XYZ)\n plot_single_colour_swatch(\n ColourSwatch('Sample', RGB),\n text_parameters={'size': 'x-large'},\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_Neutral5.png')\n patch_name = 'neutral 5 (.70 D)'\n patch_sd = colour.COLOURCHECKERS_SDS['ColorChecker N Ohta'][patch_name]\n with domain_range_scale('1'):\n XYZ = colour.sd_to_XYZ(patch_sd, cmfs, illuminant)\n RGB = colour.XYZ_to_sRGB(XYZ)\n plot_single_colour_swatch(\n ColourSwatch(patch_name.title(), RGB),\n text_parameters={'size': 'x-large'},\n **arguments)\n\n arguments['filename'] = os.path.join(output_directory,\n 'Tutorial_Colour_Checker.png')\n plot_single_colour_checker(\n colour_checker='ColorChecker 2005',\n text_parameters={'visible': False},\n **arguments)\n\n arguments['filename'] = os.path.join(\n output_directory, 'Tutorial_CIE_1931_Chromaticity_Diagram.png')\n xy = colour.XYZ_to_xy(XYZ)\n plot_chromaticity_diagram_CIE1931(standalone=False)\n x, y = xy\n plt.plot(x, y, 'o-', color='white')\n # Annotating the plot.\n plt.annotate(\n patch_sd.name.title(),\n xy=xy,\n xytext=(-50, 30),\n textcoords='offset points',\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=-0.2'))\n render(\n standalone=True,\n limits=(-0.1, 0.9, -0.1, 0.9),\n x_tighten=True,\n y_tighten=True,\n **arguments)\n\n # *************************************************************************\n # \"basics.rst\"\n # *************************************************************************\n arguments['filename'] = os.path.join(output_directory,\n 'Basics_Logo_Small_001_CIE_XYZ.png')\n RGB = colour.read_image(\n os.path.join(output_directory, 'Logo_Small_001.png'))[..., 0:3]\n XYZ = colour.sRGB_to_XYZ(RGB)\n colour.plotting.plot_image(\n XYZ, text_parameters={'text': 'sRGB to XYZ'}, **arguments)\n\n\nif __name__ == '__main__':\n generate_documentation_plots(os.path.join('..', 'docs', '_static'))\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.ipt` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.models import XYZ_to_IPT, IPT_to_XYZ, IPT_hue_angle\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestXYZ_to_IPT', 'TestIPT_to_XYZ', 'TestIPTHueAngle']\n\n\nclass TestXYZ_to_IPT(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.ipt.TestXYZ_to_IPT` definition unit tests\n methods.\n \"\"\"\n\n def test_XYZ_to_IPT(self):\n \"\"\"\n Tests :func:`colour.models.ipt.XYZ_to_IPT` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n XYZ_to_IPT(np.array([0.20654008, 0.12197225, 0.05136952])),\n np.array([0.38426191, 0.38487306, 0.18886838]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n XYZ_to_IPT(np.array([0.14222010, 0.23042768, 0.10495772])),\n np.array([0.49437481, -0.19251742, 0.18080304]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n XYZ_to_IPT(np.array([0.07818780, 0.06157201, 0.28099326])),\n np.array([0.35167774, -0.07525627, -0.30921279]),\n decimal=7)\n\n def test_n_dimensional_XYZ_to_IPT(self):\n \"\"\"\n Tests :func:`colour.models.ipt.XYZ_to_IPT` definition n-dimensional\n support.\n \"\"\"\n\n XYZ = np.array([0.20654008, 0.12197225, 0.05136952])\n IPT = XYZ_to_IPT(XYZ)\n\n XYZ = np.tile(XYZ, (6, 1))\n IPT = np.tile(IPT, (6, 1))\n np.testing.assert_almost_equal(XYZ_to_IPT(XYZ), IPT, decimal=7)\n\n XYZ = np.reshape(XYZ, (2, 3, 3))\n IPT = np.reshape(IPT, (2, 3, 3))\n np.testing.assert_almost_equal(XYZ_to_IPT(XYZ), IPT, decimal=7)\n\n def test_domain_range_scale_XYZ_to_IPT(self):\n \"\"\"\n Tests :func:`colour.models.ipt.XYZ_to_IPT` definition domain and\n range scale support.\n \"\"\"\n\n XYZ = np.array([0.20654008, 0.12197225, 0.05136952])\n IPT = XYZ_to_IPT(XYZ)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n XYZ_to_IPT(XYZ * factor), IPT * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_XYZ_to_IPT(self):\n \"\"\"\n Tests :func:`colour.models.ipt.XYZ_to_IPT` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n XYZ = np.array(case)\n XYZ_to_IPT(XYZ)\n\n\nclass TestIPT_to_XYZ(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.ipt.IPT_to_XYZ` definition unit tests\n methods.\n \"\"\"\n\n def test_IPT_to_XYZ(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_to_XYZ` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n IPT_to_XYZ(np.array([0.38426191, 0.38487306, 0.18886838])),\n np.array([0.20654008, 0.12197225, 0.05136952]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n IPT_to_XYZ(np.array([0.49437481, -0.19251742, 0.18080304])),\n np.array([0.14222010, 0.23042768, 0.10495772]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n IPT_to_XYZ(np.array([0.35167774, -0.07525627, -0.30921279])),\n np.array([0.07818780, 0.06157201, 0.28099326]),\n decimal=7)\n\n def test_n_dimensional_IPT_to_XYZ(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_to_XYZ` definition n-dimensional\n support.\n \"\"\"\n\n IPT = np.array([0.38426191, 0.38487306, 0.18886838])\n XYZ = IPT_to_XYZ(IPT)\n\n IPT = np.tile(IPT, (6, 1))\n XYZ = np.tile(XYZ, (6, 1))\n np.testing.assert_almost_equal(IPT_to_XYZ(IPT), XYZ, decimal=7)\n\n IPT = np.reshape(IPT, (2, 3, 3))\n XYZ = np.reshape(XYZ, (2, 3, 3))\n np.testing.assert_almost_equal(IPT_to_XYZ(IPT), XYZ, decimal=7)\n\n def test_domain_range_scale_IPT_to_XYZ(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_to_XYZ` definition domain and\n range scale support.\n \"\"\"\n\n IPT = np.array([0.38426191, 0.38487306, 0.18886838])\n XYZ = IPT_to_XYZ(IPT)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n IPT_to_XYZ(IPT * factor), XYZ * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_IPT_to_XYZ(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_to_XYZ` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n IPT = np.array(case)\n IPT_to_XYZ(IPT)\n\n\nclass TestIPTHueAngle(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.ipt.IPT_hue_angle` definition unit tests\n methods.\n \"\"\"\n\n def test_IPT_hue_angle(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_hue_angle` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n IPT_hue_angle(np.array([0.20654008, 0.12197225, 0.05136952])),\n 22.838754548625527,\n decimal=7)\n\n np.testing.assert_almost_equal(\n IPT_hue_angle(np.array([0.14222010, 0.23042768, 0.10495772])),\n 24.488834912466245,\n decimal=7)\n\n np.testing.assert_almost_equal(\n IPT_hue_angle(np.array([0.07818780, 0.06157201, 0.28099326])),\n 77.640533743711813,\n decimal=7)\n\n def test_n_dimensional_IPT_hue_angle(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_hue_angle` definition n-dimensional\n support.\n \"\"\"\n\n IPT = np.array([0.20654008, 0.12197225, 0.05136952])\n hue = IPT_hue_angle(IPT)\n\n IPT = np.tile(IPT, (6, 1))\n hue = np.tile(hue, 6)\n np.testing.assert_almost_equal(IPT_hue_angle(IPT), hue, decimal=7)\n\n IPT = np.reshape(IPT, (2, 3, 3))\n hue = np.reshape(hue, (2, 3))\n np.testing.assert_almost_equal(IPT_hue_angle(IPT), hue, decimal=7)\n\n def test_domain_range_scale_IPT_hue_angle(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_hue_angle` definition domain and\n range scale support.\n \"\"\"\n\n IPT = np.array([0.20654008, 0.12197225, 0.05136952])\n hue = IPT_hue_angle(IPT)\n\n d_r = (('reference', 1, 1), (1, 1, 1 / 360), (100, 100, 1 / 3.6))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n IPT_hue_angle(IPT * factor_a), hue * factor_b, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_IPT_hue_angle(self):\n \"\"\"\n Tests :func:`colour.models.ipt.IPT_hue_angle` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n IPT = np.array(case)\n IPT_hue_angle(IPT)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.random.random", "numpy.random.seed" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ] ]
ChiangYintso/ai
[ "44e6fac5f6854383a2314f10b0afc1b35952e852" ]
[ "jyzml/utils/metrics.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom .decorator import len_equal\n\n\n@len_equal\ndef accuracy_score(y_actual: np.ndarray, y_predict: np.ndarray) -> float:\n \"\"\"\n 计算准确率\n :param y_actual: 真实值\n :param y_predict: 预测值\n :return: 准确率\n \"\"\"\n if y_actual.shape != y_predict.shape:\n raise ValueError('the shape of y_actual must be equal to y_predict')\n\n return sum(y_actual == y_predict) / len(y_actual)\n\n\n@len_equal\ndef mean_squared_error(y_predict: np.ndarray, y_true: np.ndarray):\n \"\"\"\n 计算y_true和y_predict之间的MSE\n :param y_predict: 预测结果集\n :param y_true: 真值\n :return: MSE\n \"\"\"\n return np.mean((y_predict - y_true) ** 2)\n\n\n@len_equal\ndef root_mean_squared_error(y_predict: np.ndarray, y_true: np.ndarray):\n \"\"\"\n 计算y_true和y_predict之间的RMSE\n :param y_predict: 预测结果集\n :param y_true: 真值\n :return: RMSE\n \"\"\"\n return np.sqrt(mean_squared_error(y_predict, y_true))\n\n\ndef mean_absolute_error(y_predict: np.ndarray, y_true: np.ndarray):\n \"\"\"\n 计算y_true和y_predict之间的MAE\n :param y_predict: 预测结果集\n :param y_true: 真值\n :return:\n \"\"\"\n return np.mean(np.absolute(y_predict - y_true))\n\n\n@len_equal\ndef r2_score(y_predict: np.ndarray, y_true: np.ndarray):\n \"\"\"\n 计算y_true和y_predict之间的R Square\n :param y_predict: 预测结果集\n :param y_true: 真值\n :return:\n \"\"\"\n return 1 - mean_squared_error(y_true, y_predict)/np.var(y_true)\n" ]
[ [ "numpy.var", "numpy.absolute", "numpy.mean" ] ]
stjordanis/tfx
[ "4749388de03230361f2b7b733a657b3bc18b4152", "4749388de03230361f2b7b733a657b3bc18b4152" ]
[ "tfx/scripts/run_executor_test.py", "tfx/examples/bert/utils/bert_models.py" ]
[ "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.scripts.run_executor.\"\"\"\n\nimport json\nfrom typing import Any, Dict, List\n\nimport tensorflow as tf\n\nfrom tfx import types\nfrom tfx.dsl.components.base import base_executor\nfrom tfx.scripts import run_executor\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\n\n\nclass ArgsCapture:\n instance = None\n\n def __enter__(self):\n ArgsCapture.instance = self\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n ArgsCapture.instance = None\n\n\nclass FakeExecutor(base_executor.BaseExecutor):\n\n def Do(self, input_dict: Dict[str, List[types.Artifact]],\n output_dict: Dict[str, List[types.Artifact]],\n exec_properties: Dict[str, Any]) -> None:\n \"\"\"Overrides BaseExecutor.Do().\"\"\"\n args_capture = ArgsCapture.instance\n args_capture.input_dict = input_dict\n args_capture.output_dict = output_dict\n args_capture.exec_properties = exec_properties\n\n\nclass RunExecutorTest(tf.test.TestCase):\n\n def testMainEmptyInputs(self):\n \"\"\"Test executor class import under empty inputs/outputs.\"\"\"\n inputs = {\n 'x': [\n standard_artifacts.ExternalArtifact(),\n standard_artifacts.ExternalArtifact()\n ]\n }\n outputs = {'y': [standard_artifacts.Examples()]}\n exec_properties = {'a': 'b'}\n args = [\n '--executor_class_path=%s.%s' %\n (FakeExecutor.__module__, FakeExecutor.__name__),\n '--inputs=%s' % artifact_utils.jsonify_artifact_dict(inputs),\n '--outputs=%s' % artifact_utils.jsonify_artifact_dict(outputs),\n '--exec-properties=%s' % json.dumps(exec_properties),\n ]\n with ArgsCapture() as args_capture:\n run_executor.main(run_executor._parse_flags(args))\n # TODO(b/131417512): Add equal comparison to types.Artifact class so we\n # can use asserters.\n self.assertSetEqual(\n set(args_capture.input_dict.keys()), set(inputs.keys()))\n self.assertSetEqual(\n set(args_capture.output_dict.keys()), set(outputs.keys()))\n self.assertDictEqual(args_capture.exec_properties, exec_properties)\n\n\n# TODO(zhitaoli): Add tests for:\n# - base64 decoding of flags;\n# - write output.\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Configurable fine-tuning BERT models for various tasks.\"\"\"\n\nfrom typing import Optional, List, Union\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\n\ndef build_bert_classifier(bert_layer: tf.keras.layers.Layer,\n max_len: int,\n num_classes: int,\n dropout: float = 0.1,\n activation: Optional[str] = None):\n \"\"\"BERT Keras model for classification.\n\n Connect configurable fully connected layers on top of the BERT\n pooled_output.\n\n Args:\n bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.\n max_len: The maximum length of preprocessed tokens.\n num_classes: Number of unique classes in the labels. Determines the output\n shape of the classification layer.\n dropout: Dropout rate to be used for the classification layer.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied (ie. \"linear\" activation: a(x) = x).\n\n Returns:\n A Keras model.\n \"\"\"\n input_layer_names = [\"input_word_ids\", \"input_mask\", \"segment_ids\"]\n\n input_layers = [\n keras.layers.Input(shape=(max_len,), dtype=tf.int64, name=name)\n for name in input_layer_names\n ]\n\n converted_layers = [tf.cast(k, tf.int32) for k in input_layers]\n\n pooled_output, _ = bert_layer(converted_layers)\n output = keras.layers.Dropout(dropout)(pooled_output)\n output = keras.layers.Dense(num_classes, activation=activation)(output)\n model = keras.Model(input_layers, output)\n return model\n\n\ndef compile_bert_classifier(\n model: tf.keras.Model,\n loss: tf.keras.losses = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True),\n learning_rate: float = 2e-5,\n metrics: Optional[List[Union[str, tf.keras.metrics.Metric]]] = None):\n \"\"\"Compile the BERT classifier using suggested parameters.\n\n Args:\n model: A keras model. Most likely the output of build_bert_classifier.\n loss: tf.keras.losses. The suggested loss function expects integer labels\n (e.g. 0, 1, 2). If the labels are one-hot encoded, consider using\n tf.keras.lossesCategoricalCrossEntropy with from_logits set to true.\n learning_rate: Suggested learning rate to be used in\n tf.keras.optimizer.Adam. The three suggested learning_rates for\n fine-tuning are [2e-5, 3e-5, 5e-5].\n metrics: Default None will use ['sparse_categorical_accuracy']. An array of\n strings or tf.keras.metrics.\n\n Returns:\n None.\n \"\"\"\n if metrics is None:\n metrics = [\"sparse_categorical_accuracy\"]\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate),\n loss=loss,\n metrics=metrics)\n\n\ndef build_and_compile_bert_classifier(\n bert_layer: tf.keras.layers.Layer,\n max_len: int,\n num_classes: int,\n learning_rate: float = 5e-5,\n metrics: Optional[List[Union[str, tf.keras.metrics.Metric]]] = None):\n \"\"\"Build and compile keras BERT classification model.\n\n Apart from the necessary inputs, use default/suggested parameters in build\n and compile BERT classifier functions.\n\n Args:\n bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.\n max_len: The maximum length of preprocessed tokens.\n num_classes: Number of unique classes in the labels. Determines the output\n shape of the classification layer.\n learning_rate: Suggested learning rate to be used in\n tf.keras.optimizer.Adam. The three suggested learning_rates for\n fine-tuning are [2e-5, 3e-5,5e-5]\n metrics: Default None will use ['sparse_categorical_accuracy']. An array of\n strings or tf.keras.metrics.\n\n Returns:\n A compiled keras BERT Classification model.\n \"\"\"\n if metrics is None:\n metrics = [\"sparse_categorical_accuracy\"]\n\n model = build_bert_classifier(bert_layer, max_len, num_classes)\n\n compile_bert_classifier(model, learning_rate=learning_rate, metrics=metrics)\n return model\n" ]
[ [ "tensorflow.test.main" ], [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.cast", "tensorflow.keras.Model", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Input" ] ]
Sudhakaran7/car_gesture
[ "19eb2ba7ab5caeee9589cf639cddb74a9ece97d4" ]
[ "gesturecontrol.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 21 20:42:01 2020\n@author: Gnana Murthiy\n@description: Game controlling with Fists in Navy blue gloves using openCV. Left Fist- Break Righ Fist- acceleration\nThis code is inspired by a project, by Patel Digant: https://github.com/pateldigant/gesture-gaming-python \nCustom Logic was written to handle both the keys simultaneously for gaming requirements.\n\"\"\"\n\nfrom imutils.video import VideoStream\nimport numpy as np\nimport cv2\nimport imutils\nimport time\nfrom directkeys import right_pressed,left_pressed\nfrom directkeys import PressKey, ReleaseKey \n\n\nbreak_key_pressed=left_pressed\naccelerato_key_pressed=right_pressed\n\n# define the lower and upper boundaries of the \"navy blue\" object in the HSV color space\n#https://stackoverflow.com/questions/36817133/identifying-the-range-of-a-color-in-hsv-using-opencv\nblueLower = np.array([110, 40, 40])\nblueUpper = np.array([130,255,255])\n\nvs = VideoStream(src=0).start()\n \n# allow the camera or video file to warm up\ntime.sleep(2.0)\ninitial = True\nflag = False\ncurrent_key_pressed = set()\ncircle_radius = 30\nwindowSize = 200\nlr_counter = 0\n\n# keep looping\nbreak_pressed=False\naccelerator_pressed=False\nwhile True:\n keyPressed = False\n break_pressed=False\n accelerator_pressed=False\n # grab the current frame\n frame = vs.read()\n height,width = frame.shape[:2]\n \n #Flipped the frame so that left hand appears on the left side and right hand appears on the right side\n frame = cv2.flip(frame,1);\n \n # resize the frame, blur it, and convert it to the HSV color space\n frame = imutils.resize(frame, height=300)\n frame = imutils.resize(frame, width=600)\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n \n # crteate a mask for the orange color and perform dilation and erosion to remove any small\n # blobs left in the mask\n mask = cv2.inRange(hsv, blueLower, blueUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n \n # find contours in the mask and initialize the current\n # (x, y) center of the orange object\n\n # divide the frame into two halves so that we can have one half control the acceleration/brake \n # and other half control the left/right steering.\n left_mask = mask[:,0:width//2,]\n right_mask = mask[:,width//2:,]\n\n #find the contours in the left and right frame to find the center of the object\n cnts_left = cv2.findContours(left_mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts_left = imutils.grab_contours(cnts_left)\n center_left = None\n\n cnts_right = cv2.findContours(right_mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts_right = imutils.grab_contours(cnts_right)\n center_right = None\n # only proceed if at least one contour was found\n key_count=0\n key_pressed=0\n if len(cnts_left) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and centroid\n c = max(cnts_left, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n # find the center from the moments 0.000001 is added to the denominator so that divide by \n # zero exception doesn't occur\n center_left = (int(M[\"m10\"] / (M[\"m00\"]+0.000001)), int(M[\"m01\"] / (M[\"m00\"]+0.000001)))\n #print(\"center_left\",center_left)\n # only proceed if the radius meets a minimum size\n if radius > circle_radius:\n # draw the circle and centroid on the frame,\n cv2.circle(frame, (int(x), int(y)), int(radius),\n (0, 0, 255), 2)\n cv2.circle(frame, center_left, 5, (0, 0, 255), -1)\n #Bottom Left region\n if center_left[1] > 250:\n cv2.putText(frame,'Break Applied',(10,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)\n PressKey(break_key_pressed)\n break_pressed=True\n current_key_pressed.add(break_key_pressed)\n #Break key- 75 #Acc key-77\n key_pressed=break_key_pressed\n keyPressed = True\n key_count=key_count+1\n # only proceed if at least one contour was found\n if len(cnts_right) > 0:\n c2 = max(cnts_right, key=cv2.contourArea)\n ((x2, y2), radius2) = cv2.minEnclosingCircle(c2)\n M2 = cv2.moments(c2)\n center_right = (int(M2[\"m10\"] / (M2[\"m00\"]+0.000001)), int(M2[\"m01\"] / (M2[\"m00\"]+0.000001)))\n center_right = (center_right[0]+width//2,center_right[1])\n # only proceed if the radius meets a minimum size\n if radius2 > circle_radius:\n cv2.circle(frame, (int(x2)+width//2, int(y2)), int(radius2),\n (0, 255, 0), 2)\n cv2.circle(frame, center_right, 5, (0, 255, 0), -1)\n #Bottom Right region\n if center_right[1] >250 :\n cv2.putText(frame,'Acc. Applied',(350,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),3)\n PressKey(accelerato_key_pressed)\n key_pressed=accelerato_key_pressed\n accelerator_pressed=True\n keyPressed = True\n current_key_pressed.add(accelerato_key_pressed)\n key_count=key_count+1\n \n frame_copy=frame.copy()\n #Bottom left region rectangle\n frame_copy = cv2.rectangle(frame_copy,(0,height//2 ),(width//2,width),(255,255,255),1)\n cv2.putText(frame_copy,'Break',(10,280),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),3)\n #Bottom right region rectangle\n frame_copy = cv2.rectangle(frame_copy,(width//2,height//2),(width,height),(255,255,255),1)\n cv2.putText(frame_copy,'Acceleration',(330,280),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),3)\n\n # show the frame to our screen\n cv2.imshow(\"Frame\", frame_copy)\n\n #If part: We need to release the pressed key if none of the key is pressed else the program will keep on sending\n #Else part:If different keys(Only one key in each frame) are pressed in previous and current frames, then we must\n #release previous frame key, Also release the key in current frame key for smoother control\n if not keyPressed and len(current_key_pressed) != 0:\n for key in current_key_pressed:\n ReleaseKey(key)\n current_key_pressed = set()\n elif key_count==1 and len(current_key_pressed)==2: \n for key in current_key_pressed: \n if key_pressed!=key:\n ReleaseKey(key)\n current_key_pressed = set()\n for key in current_key_pressed:\n ReleaseKey(key)\n current_key_pressed = set()\n \n key = cv2.waitKey(1) & 0xFF\n # if the 'q' key is pressed, stop the loop\n if key == ord(\"q\"):\n break\n \n\nvs.stop() \n# close all windows\ncv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
JulioZanotto/CarND_behavioral_cloning_P3
[ "86fb6a4381029bd018234082298dd2a5446fe1bc" ]
[ "model_generator.py" ]
[ "# All Libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport csv\nfrom tqdm import tqdm\n\n# Setup Keras\nimport keras\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\nfrom keras.layers.convolutional import Conv2D, Cropping2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers import GaussianNoise\nfrom keras.optimizers import Adam\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\n# Here this configuration was needed to use the GPU, probably because o CUDA and CUDnn\n# otherwise it wouldnt run\nconfig = tf.compat.v1.ConfigProto(\n device_count = {'GPU': 0}\n )\n\nsess = tf.Session(config=config) \nkeras.backend.set_session(sess)\n\n# Read the driving.csv with all the collected data\ndrive_csv = pd.read_csv('driving_log.csv', header=None)\n\n# Dropping and working with the dataframe for a better and easier future generator\ndrive_csv.drop(columns=[4,5,6], inplace=True)\n\n# Dealing with the names of the files\ndrive_csv['center'] = drive_csv[0].apply(lambda x: x.split('/')[-1])\ndrive_csv['left'] = drive_csv[1].apply(lambda x: x.split('/')[-1])\ndrive_csv['right'] = drive_csv[2].apply(lambda x: x.split('/')[-1])\n\n# Generating the dataframe for the generator\ndrive_dict = pd.DataFrame()\n\nfor i in tqdm(range(len(drive_csv))):\n\n # Storing the data\n images = []\n measurements = []\n \n # Get the center measurement for angle correction for the right and left image\n measurement_center = float(drive_csv.iloc[i, 3])\n \n # create adjusted steering measurements for the side camera images\n correction = 0.2 # this is a parameter to tune\n steering_left = measurement_center + correction\n steering_right = measurement_center - correction\n\n # Appending all data\n measurements.append(measurement_center)\n measurements.append(steering_left)\n measurements.append(steering_right)\n images.append(drive_csv.iloc[i, 4])\n images.append(drive_csv.iloc[i, 5])\n images.append(drive_csv.iloc[i, 6])\n \n # Storing in a dataframe for a cleaner generator (batches)\n for j in range(3):\n drive_dict = drive_dict.append({'images': images[j], 'angle': measurements[j]}, \n ignore_index=True)\n\n# Example code from Udacity to get the samples for the generator\nsamples = []\nfor line in drive_dict.values:\n samples.append(line)\n\n# Using sklearn to split the data in train and validation, chose a split of 25% for Validation\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.25)\n\n# Creating the generator\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n\n for batch_sample in batch_samples:\n measurement_center = float(batch_sample[1])\n \n # Get the image and convert to RGB\n image_center = cv2.imread('./IMG/' + batch_sample[0])\n image_center = cv2.cvtColor(image_center, cv2.COLOR_BGR2RGB)\n images.append(image_center)\n measurements.append(measurement_center)\n\n\n # Transform into array\n X_train = np.array(images)\n y_train = np.array(measurements) \n \n yield shuffle(X_train, y_train)\n\n# Model Architecture\n# Inspired on the NVIDIA model, modified the fully connected layer\nmodel = Sequential()\n\n# Lambda layer for normalization, GaussianNoise for better generalization and\n# the Cropping for the better ROI\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))\nmodel.add(GaussianNoise(0.1))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\n\n#Layers just like NVIDIA model\nmodel.add(Conv2D(24, (5,5), activation='relu'))\n\n# Added a MaxPooling on these next layer for a smaller model\n# The performance was better with same Mean Squared Error\nmodel.add(Conv2D(36, (5,5), activation='relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(48, (5,5), activation='relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D())\n\n# Fully Connected, made it a little smaller from NVIDIA\nmodel.add(Flatten())\n\n# Added DropOut on the fully connected layer for better regularization\nmodel.add(Dense(200))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\n# Output of the model, single neuron for angle prediction\nmodel.add(Dense(1))\n\n# Set our batch size\nbatch_size=32\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n# I chose a lower lr for the Adam, instead of the 1e-3, made a better convergence\noptim = Adam(lr=0.0001)\n\n# Model compiled with the MSE error for the regression task\nmodel.compile(loss='mse', optimizer=optim, metrics=['mse'])\n\n# Model training\nmodel.fit_generator(train_generator,\n steps_per_epoch=np.ceil(len(train_samples)/batch_size),\n validation_data=validation_generator,\n validation_steps=np.ceil(len(validation_samples)/batch_size),\n epochs=7, verbose=1)\n\n# After the training save the model\nmodel.save('model_trained.h5')" ]
[ [ "tensorflow.compat.v1.ConfigProto", "pandas.read_csv", "sklearn.utils.shuffle", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "tensorflow.Session", "numpy.array" ] ]
mindw96/Fast-SRGAN
[ "d8b779a09df81394d44483e0d2664f8744a4c7b5" ]
[ "dataloader.py" ]
[ "import tensorflow as tf\nimport os\n\nfrom tensorflow.python.ops import array_ops, math_ops\n\n\nclass DataLoader(object):\n \"\"\"Data Loader for the SR GAN, that prepares a tf data object for training.\"\"\"\n\n def __init__(self, image_dir, hr_image_size):\n \"\"\"\n Initializes the dataloader.\n Args:\n image_dir: The path to the directory containing high resolution images.\n hr_image_size: Integer, the crop size of the images to train on (High\n resolution images will be cropped to this width and height).\n Returns:\n The dataloader object.\n \"\"\"\n self.image_paths = [os.path.join(image_dir, x) for x in os.listdir(image_dir)]\n self.image_size = hr_image_size\n\n def _parse_image(self, image_path):\n \"\"\"\n Function that loads the images given the path.\n Args:\n image_path: Path to an image file.\n Returns:\n image: A tf tensor of the loaded image.\n \"\"\"\n\n image = tf.io.read_file(image_path)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n # Check if image is large enough\n if tf.keras.backend.image_data_format() == 'channels_last':\n shape = array_ops.shape(image)[:2]\n else:\n shape = array_ops.shape(image)[1:]\n cond = math_ops.reduce_all(shape >= tf.constant(self.image_size))\n\n image = tf.cond(cond, lambda: tf.identity(image),\n lambda: tf.image.resize(image, [self.image_size, self.image_size]))\n\n return image\n\n def _random_crop(self, image):\n \"\"\"\n Function that crops the image according a defined width\n and height.\n Args:\n image: A tf tensor of an image.\n Returns:\n image: A tf tensor of containing the cropped image.\n \"\"\"\n\n image = tf.image.random_crop(image, [self.image_size, self.image_size, 3])\n\n return image\n\n def _high_low_res_pairs(self, high_res):\n \"\"\"\n Function that generates a low resolution image given the\n high resolution image. The downsampling factor is 4x.\n Args:\n high_res: A tf tensor of the high res image.\n Returns:\n low_res: A tf tensor of the low res image.\n high_res: A tf tensor of the high res image.\n \"\"\"\n\n low_res = tf.image.resize(high_res,\n [self.image_size // 4, self.image_size // 4],\n method='bicubic')\n\n return low_res, high_res\n\n def _rescale(self, low_res, high_res):\n \"\"\"\n Function that rescales the pixel values to the -1 to 1 range.\n For use with the generator output tanh function.\n Args:\n low_res: The tf tensor of the low res image.\n high_res: The tf tensor of the high res image.\n Returns:\n low_res: The tf tensor of the low res image, rescaled.\n high_res: the tf tensor of the high res image, rescaled.\n \"\"\"\n high_res = high_res * 2.0 - 1.0\n\n return low_res, high_res\n\n def dataset(self, batch_size, threads=4):\n \"\"\"\n Returns a tf dataset object with specified mappings.\n Args:\n batch_size: Int, The number of elements in a batch returned by the dataset.\n threads: Int, CPU threads to use for multi-threaded operation.\n Returns:\n dataset: A tf dataset object.\n \"\"\"\n\n # Generate tf dataset from high res image paths.\n dataset = tf.data.Dataset.from_tensor_slices(self.image_paths)\n\n # Read the images\n dataset = dataset.map(self._parse_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Crop out a piece for training\n dataset = dataset.map(self._random_crop, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Generate low resolution by downsampling crop.\n dataset = dataset.map(self._high_low_res_pairs, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Rescale the values in the input\n dataset = dataset.map(self._rescale, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Batch the input, drop remainder to get a defined batch size.\n # Prefetch the data for optimal GPU utilization.\n dataset = dataset.shuffle(30).batch(batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n" ]
[ [ "tensorflow.constant", "tensorflow.python.ops.array_ops.shape", "tensorflow.keras.backend.image_data_format", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.identity", "tensorflow.image.random_crop", "tensorflow.image.resize", "tensorflow.image.convert_image_dtype", "tensorflow.io.read_file", "tensorflow.image.decode_jpeg" ] ]
nspope/tskit
[ "d421b4396a46472f74d8667a37413c188d1bddaf" ]
[ "python/tests/test_wright_fisher.py" ]
[ "# MIT License\n#\n# Copyright (c) 2018-2019 Tskit Developers\n# Copyright (C) 2017 University of Oxford\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nTest various functions using messy tables output by a forwards-time simulator.\n\"\"\"\nimport itertools\nimport random\n\nimport msprime\nimport numpy as np\nimport numpy.testing as nt\nimport pytest\n\nimport tests as tests\nimport tests.tsutil as tsutil\nimport tskit\n\n\nclass WrightFisherSimulator:\n \"\"\"\n SIMPLE simulation of `num_pops` bisexual, haploid Wright-Fisher populations\n of size `N` for `ngens` generations, in which each individual survives with\n probability `survival` and only those who die are replaced. If `num_pops` is\n greater than 1, the individual to be replaced has a chance `mig_rate` of\n being the offspring of nodes from a different and randomly chosen\n population. If `num_loci` is None, the chromosome is 1.0 Morgans long. If\n `num_loci` not None, a discrete recombination model is used where\n breakpoints are chosen uniformly from 1 to `num_loci` - 1. If\n `deep_history` is True, a history to coalescence of just one population of\n `self.N` samples is added at the beginning.\n \"\"\"\n\n def __init__(\n self,\n N,\n survival=0.0,\n seed=None,\n deep_history=True,\n debug=False,\n initial_generation_samples=False,\n num_loci=None,\n num_pops=1,\n mig_rate=0.0,\n record_migrations=False,\n record_individuals=True,\n ):\n self.N = N\n self.num_pops = num_pops\n self.num_loci = num_loci\n self.survival = survival\n self.mig_rate = mig_rate\n self.record_migrations = record_migrations\n self.record_individuals = record_individuals\n self.deep_history = deep_history\n self.debug = debug\n self.initial_generation_samples = initial_generation_samples\n self.seed = seed\n self.rng = random.Random(seed)\n\n def random_breakpoint(self):\n if self.num_loci is None:\n return min(1.0, max(0.0, 2 * self.rng.random() - 0.5))\n else:\n return self.rng.randint(1, self.num_loci - 1)\n\n def run(self, ngens):\n L = 1\n if self.num_loci is not None:\n L = self.num_loci\n tables = tskit.TableCollection(sequence_length=L)\n for _ in range(self.num_pops):\n tables.populations.add_row()\n if self.deep_history:\n # initial population\n population_configurations = [\n msprime.PopulationConfiguration(sample_size=self.N)\n ]\n init_ts = msprime.simulate(\n population_configurations=population_configurations,\n recombination_rate=1.0,\n length=L,\n random_seed=self.seed,\n )\n init_tables = init_ts.dump_tables()\n flags = init_tables.nodes.flags\n if not self.initial_generation_samples:\n flags = np.zeros_like(init_tables.nodes.flags)\n tables.nodes.set_columns(time=init_tables.nodes.time + ngens, flags=flags)\n tables.edges.set_columns(\n left=init_tables.edges.left,\n right=init_tables.edges.right,\n parent=init_tables.edges.parent,\n child=init_tables.edges.child,\n )\n else:\n flags = 0\n if self.initial_generation_samples:\n flags = tskit.NODE_IS_SAMPLE\n for p in range(self.num_pops):\n for _ in range(self.N):\n individual = -1\n if self.record_individuals:\n individual = tables.individuals.add_row(parents=[-1, -1])\n tables.nodes.add_row(\n flags=flags, time=ngens, population=p, individual=individual\n )\n\n pops = [\n list(range(p * self.N, (p * self.N) + self.N)) for p in range(self.num_pops)\n ]\n pop_ids = list(range(self.num_pops))\n for t in range(ngens - 1, -1, -1):\n if self.debug:\n print(\"t:\", t)\n print(\"pops:\", pops)\n dead = [[self.rng.random() > self.survival for _ in pop] for pop in pops]\n # sample these first so that all parents are from the previous gen\n parent_pop = []\n new_parents = []\n for p in pop_ids:\n w = [\n 1 - self.mig_rate if i == p else self.mig_rate / (self.num_pops - 1)\n for i in pop_ids\n ]\n parent_pop.append(self.rng.choices(pop_ids, w, k=sum(dead[p])))\n new_parents.append(\n [\n self.rng.choices(pops[parent_pop[p][k]], k=2)\n for k in range(sum(dead[p]))\n ]\n )\n\n if self.debug:\n for p in pop_ids:\n print(\"Replacing\", sum(dead[p]), \"individuals from pop\", p)\n for p in pop_ids:\n k = 0\n for j in range(self.N):\n if dead[p][j]:\n lparent, rparent = new_parents[p][k]\n individual = -1\n if self.record_individuals:\n individual = tables.individuals.add_row(\n parents=[\n tables.nodes[lparent].individual,\n tables.nodes[rparent].individual,\n ]\n )\n offspring = tables.nodes.add_row(\n time=t, population=p, individual=individual\n )\n if parent_pop[p][k] != p and self.record_migrations:\n tables.migrations.add_row(\n left=0.0,\n right=L,\n node=offspring,\n source=parent_pop[p][k],\n dest=p,\n time=t,\n )\n k += 1\n bp = self.random_breakpoint()\n if self.debug:\n print(\"--->\", offspring, lparent, rparent, bp)\n pops[p][j] = offspring\n if bp > 0.0:\n tables.edges.add_row(\n left=0.0, right=bp, parent=lparent, child=offspring\n )\n if bp < L:\n tables.edges.add_row(\n left=bp, right=L, parent=rparent, child=offspring\n )\n\n if self.debug:\n print(\"Done! Final pop:\")\n print(pops)\n flags = tables.nodes.flags\n flattened = [n for pop in pops for n in pop]\n flags[flattened] = tskit.NODE_IS_SAMPLE\n tables.nodes.flags = flags\n tables.time_units = \"generations\"\n return tables\n\n\ndef wf_sim(\n N,\n ngens,\n survival=0.0,\n deep_history=True,\n debug=False,\n seed=None,\n initial_generation_samples=False,\n num_loci=None,\n num_pops=1,\n mig_rate=0.0,\n record_migrations=False,\n record_individuals=True,\n):\n sim = WrightFisherSimulator(\n N,\n survival=survival,\n deep_history=deep_history,\n debug=debug,\n seed=seed,\n initial_generation_samples=initial_generation_samples,\n num_loci=num_loci,\n num_pops=num_pops,\n mig_rate=mig_rate,\n record_migrations=record_migrations,\n record_individuals=record_individuals,\n )\n return sim.run(ngens)\n\n\nclass TestSimulation:\n \"\"\"\n Tests that the simulations produce the output we expect.\n \"\"\"\n\n random_seed = 5678\n\n def test_one_gen_multipop_mig_no_deep(self):\n tables = wf_sim(\n N=5,\n ngens=1,\n num_pops=4,\n mig_rate=1.0,\n deep_history=False,\n seed=self.random_seed,\n record_migrations=True,\n )\n assert tables.time_units == \"generations\"\n assert tables.nodes.num_rows == 5 * 4 * (1 + 1)\n assert tables.edges.num_rows > 0\n assert tables.migrations.num_rows == 5 * 4\n assert tables.individuals.num_rows == tables.nodes.num_rows\n\n def test_multipop_mig_deep(self):\n N = 10\n ngens = 20\n num_pops = 3\n tables = wf_sim(\n N=N,\n ngens=ngens,\n num_pops=num_pops,\n mig_rate=1.0,\n seed=self.random_seed,\n record_migrations=True,\n )\n assert tables.nodes.num_rows > (num_pops * N * ngens) + N\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows >= N * num_pops * ngens\n assert tables.populations.num_rows == num_pops\n assert tables.individuals.num_rows >= num_pops * N * ngens\n\n # sort does not support mig\n tables.migrations.clear()\n # making sure trees are valid\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n sample_pops = tables.nodes.population[ts.samples()]\n assert np.unique(sample_pops).size == num_pops\n\n def test_multipop_mig_no_deep(self):\n N = 5\n ngens = 5\n num_pops = 2\n tables = wf_sim(\n N=N,\n ngens=ngens,\n num_pops=num_pops,\n mig_rate=1.0,\n deep_history=False,\n seed=self.random_seed,\n record_migrations=True,\n )\n assert tables.nodes.num_rows == num_pops * N * (ngens + 1)\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows == N * num_pops * ngens\n assert tables.populations.num_rows == num_pops\n assert tables.individuals.num_rows == tables.nodes.num_rows\n # FIXME this is no longer needed.\n # sort does not support mig\n tables.migrations.clear()\n # making sure trees are valid\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n sample_pops = tables.nodes.population[ts.samples()]\n assert np.unique(sample_pops).size == num_pops\n\n def test_non_overlapping_generations(self):\n tables = wf_sim(N=10, ngens=10, survival=0.0, seed=self.random_seed)\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows == 0\n assert tables.individuals.num_rows > 0\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n # All trees should have exactly one root and all internal nodes should\n # have arity > 1\n for tree in ts.trees():\n assert tree.num_roots == 1\n leaves = set(tree.leaves(tree.root))\n assert leaves == set(ts.samples())\n for u in tree.nodes():\n if tree.is_internal(u):\n assert len(tree.children(u)) > 1\n\n def test_overlapping_generations(self):\n tables = wf_sim(N=30, ngens=10, survival=0.85, seed=self.random_seed)\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows == 0\n assert tables.individuals.num_rows > 0\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n for tree in ts.trees():\n assert tree.num_roots == 1\n\n def test_one_generation_no_deep_history(self):\n N = 20\n tables = wf_sim(N=N, ngens=1, deep_history=False, seed=self.random_seed)\n assert tables.nodes.num_rows == 2 * N\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows == 0\n assert tables.individuals.num_rows > 0\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n ts = tables.tree_sequence()\n for tree in ts.trees():\n all_samples = set()\n for root in tree.roots:\n root_samples = set(tree.samples(root))\n assert len(root_samples & all_samples) == 0\n all_samples |= root_samples\n assert all_samples == set(ts.samples())\n\n def test_many_generations_no_deep_history(self):\n N = 10\n ngens = 100\n tables = wf_sim(N=N, ngens=ngens, deep_history=False, seed=self.random_seed)\n assert tables.nodes.num_rows == N * (ngens + 1)\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 0\n assert tables.mutations.num_rows == 0\n assert tables.migrations.num_rows == 0\n assert tables.individuals.num_rows > 0\n tables.sort()\n tables.simplify()\n ts = tables.tree_sequence()\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n ts = tables.tree_sequence()\n # We are assuming that everything has coalesced and we have single-root trees\n for tree in ts.trees():\n assert tree.num_roots == 1\n\n def test_with_mutations(self):\n N = 10\n ngens = 100\n tables = wf_sim(N=N, ngens=ngens, deep_history=False, seed=self.random_seed)\n tables.sort()\n ts = tables.tree_sequence()\n ts = tsutil.jukes_cantor(ts, 10, 0.1, seed=self.random_seed)\n tables = ts.tables\n assert tables.sites.num_rows > 0\n assert tables.mutations.num_rows > 0\n samples = np.where(tables.nodes.flags == tskit.NODE_IS_SAMPLE)[0].astype(\n np.int32\n )\n tables.sort()\n tables.simplify(samples)\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows > 0\n assert tables.mutations.num_rows > 0\n ts = tables.tree_sequence()\n assert ts.sample_size == N\n for hap in ts.haplotypes():\n assert len(hap) == ts.num_sites\n\n def test_with_recurrent_mutations(self):\n # actually with only ONE site, at 0.0\n N = 10\n ngens = 100\n tables = wf_sim(N=N, ngens=ngens, deep_history=False, seed=self.random_seed)\n tables.sort()\n ts = tables.tree_sequence()\n ts = tsutil.jukes_cantor(ts, 1, 10, seed=self.random_seed)\n tables = ts.tables\n assert tables.sites.num_rows == 1\n assert tables.mutations.num_rows > 0\n # before simplify\n for h in ts.haplotypes():\n assert len(h) == 1\n # after simplify\n tables.sort()\n tables.simplify()\n assert tables.nodes.num_rows > 0\n assert tables.edges.num_rows > 0\n assert tables.sites.num_rows == 1\n assert tables.mutations.num_rows > 0\n ts = tables.tree_sequence()\n assert ts.sample_size == N\n for hap in ts.haplotypes():\n assert len(hap) == ts.num_sites\n\n def test_record_individuals_initial_state(self):\n N = 10\n tables = wf_sim(N=N, ngens=0, seed=12345, deep_history=False)\n tables.sort()\n assert len(tables.individuals) == N\n assert len(tables.nodes) == N\n for individual in list(tables.individuals)[:N]:\n assert list(individual.parents) == [-1, -1]\n for j, node in enumerate(tables.nodes):\n assert node.individual == j\n\n def test_record_individuals(self):\n N = 10\n tables = wf_sim(N=N, ngens=10, seed=12345, deep_history=False)\n assert len(tables.individuals) == len(tables.nodes)\n for node_id, individual in enumerate(tables.nodes.individual):\n assert node_id == individual\n tables.sort()\n ts = tables.tree_sequence()\n for tree in ts.trees():\n for u in tree.nodes():\n individual = ts.individual(ts.node(u).individual)\n parent_node = tree.parent(u)\n if parent_node != tskit.NULL:\n parent_individual = ts.individual(ts.node(parent_node).individual)\n assert parent_individual.id in individual.parents\n\n\ndef get_wf_sims(seed):\n wf_sims = []\n for N in [5, 10, 20]:\n for surv in [0.0, 0.5, 0.9]:\n for mut in [0.01, 1.0]:\n for nloci in [1, 2, 3]:\n tables = wf_sim(N=N, ngens=N, survival=surv, seed=seed)\n tables.sort()\n ts = tables.tree_sequence()\n ts = tsutil.jukes_cantor(ts, num_sites=nloci, mu=mut, seed=seed)\n wf_sims.append(ts)\n return wf_sims\n\n\n# List of simulations used to parametrize tests.\nwf_sims = get_wf_sims(1234)\n\n\nclass TestSimplify:\n \"\"\"\n Tests for simplify on cases generated by the Wright-Fisher simulator.\n \"\"\"\n\n def verify_simplify(self, ts, new_ts, samples, node_map):\n \"\"\"\n Check that trees in `ts` match `new_ts` using the specified node_map.\n Modified from `verify_simplify_topology`. Also check that the `parent`\n and `time` column in the MutationTable is correct.\n \"\"\"\n # check trees agree at these points\n locs = [random.random() for _ in range(20)]\n locs += random.sample(list(ts.breakpoints())[:-1], min(20, ts.num_trees))\n locs.sort()\n old_trees = ts.trees()\n new_trees = new_ts.trees()\n old_right = -1\n new_right = -1\n for loc in locs:\n while old_right <= loc:\n old_tree = next(old_trees)\n old_left, old_right = old_tree.get_interval()\n assert old_left <= loc < old_right\n while new_right <= loc:\n new_tree = next(new_trees)\n new_left, new_right = new_tree.get_interval()\n assert new_left <= loc < new_right\n # print(\"comparing trees\")\n # print(\"interval:\", old_tree.interval)\n # print(old_tree.draw(format=\"unicode\"))\n # print(\"interval:\", new_tree.interval)\n # print(new_tree.draw(format=\"unicode\"))\n pairs = itertools.islice(itertools.combinations(samples, 2), 500)\n for pair in pairs:\n mapped_pair = [node_map[u] for u in pair]\n mrca1 = old_tree.get_mrca(*pair)\n assert mrca1 != tskit.NULL\n mrca2 = new_tree.get_mrca(*mapped_pair)\n assert mrca2 != tskit.NULL\n assert node_map[mrca1] == mrca2\n mut_parent = tsutil.compute_mutation_parent(ts=ts)\n nt.assert_equal(mut_parent, ts.tables.mutations.parent)\n\n def verify_haplotypes(self, ts, samples):\n \"\"\"\n Check that haplotypes are unchanged by simplify.\n \"\"\"\n sub_ts, node_map = ts.simplify(samples, map_nodes=True, filter_sites=False)\n # Sites tables should be equal\n assert ts.tables.sites == sub_ts.tables.sites\n sub_haplotypes = dict(zip(sub_ts.samples(), sub_ts.haplotypes()))\n all_haplotypes = dict(zip(ts.samples(), ts.haplotypes()))\n mapped_ids = []\n for node_id, h in all_haplotypes.items():\n mapped_node_id = node_map[node_id]\n if mapped_node_id in sub_haplotypes:\n assert h == sub_haplotypes[mapped_node_id]\n mapped_ids.append(mapped_node_id)\n assert sorted(mapped_ids) == sorted(sub_ts.samples())\n\n @pytest.mark.parametrize(\"ts\", wf_sims)\n def test_python_simplify_all_samples(self, ts):\n s = tests.Simplifier(ts, ts.samples())\n py_full_ts, py_full_map = s.simplify()\n full_ts, full_map = ts.simplify(ts.samples(), map_nodes=True)\n assert all(py_full_map == full_map)\n full_ts.tables.assert_equals(py_full_ts.tables, ignore_provenance=True)\n\n @pytest.mark.parametrize(\"ts\", wf_sims)\n @pytest.mark.parametrize(\"nsamples\", [2, 5, 10])\n def test_python_simplify_sample_subset(self, ts, nsamples):\n sub_samples = random.sample(list(ts.samples()), min(nsamples, ts.sample_size))\n s = tests.Simplifier(ts, sub_samples)\n py_small_ts, py_small_map = s.simplify()\n small_ts, small_map = ts.simplify(samples=sub_samples, map_nodes=True)\n small_ts.tables.assert_equals(py_small_ts.tables, ignore_provenance=True)\n self.verify_simplify(ts, small_ts, sub_samples, small_map)\n self.verify_haplotypes(ts, samples=sub_samples)\n\n @pytest.mark.parametrize(\"ts\", wf_sims)\n @pytest.mark.parametrize(\"nsamples\", [2, 5, 10])\n def test_simplify_tables(self, ts, nsamples):\n tables = ts.dump_tables()\n sub_samples = random.sample(list(ts.samples()), min(nsamples, ts.num_samples))\n node_map = tables.simplify(samples=sub_samples)\n small_ts = tables.tree_sequence()\n other_tables = small_ts.dump_tables()\n tables.assert_equals(other_tables, ignore_provenance=True)\n self.verify_simplify(ts, small_ts, sub_samples, node_map)\n\n @pytest.mark.parametrize(\"ts\", wf_sims)\n @pytest.mark.parametrize(\"nsamples\", [2, 5])\n def test_simplify_keep_unary(self, ts, nsamples):\n np.random.seed(123)\n ts = tsutil.mark_metadata(ts, \"nodes\")\n sub_samples = random.sample(list(ts.samples()), min(nsamples, ts.num_samples))\n random_nodes = np.random.choice(ts.num_nodes, ts.num_nodes // 2)\n ts = tsutil.insert_individuals(ts, random_nodes)\n ts = tsutil.mark_metadata(ts, \"individuals\")\n\n for params in [{}, {\"keep_unary\": True}, {\"keep_unary_in_individuals\": True}]:\n sts = ts.simplify(sub_samples, **params)\n # check samples match\n assert sts.num_samples == len(sub_samples)\n for n, sn in zip(sub_samples, sts.samples()):\n assert ts.node(n).metadata == sts.node(sn).metadata\n\n # check that nodes are correctly retained: only nodes ancestral to\n # retained samples, and: by default, only coalescent events; if\n # keep_unary_in_individuals then also nodes in individuals; if\n # keep_unary then all such nodes.\n for t in ts.trees(tracked_samples=sub_samples):\n st = sts.at(t.interval.left)\n visited = [False for _ in sts.nodes()]\n for n, sn in zip(sub_samples, sts.samples()):\n last_n = t.num_tracked_samples(n)\n while n != tskit.NULL:\n ind = ts.node(n).individual\n keep = False\n if t.num_tracked_samples(n) > last_n:\n # a coalescent node\n keep = True\n if \"keep_unary_in_individuals\" in params and ind != tskit.NULL:\n keep = True\n if \"keep_unary\" in params:\n keep = True\n if (n in sub_samples) or keep:\n visited[sn] = True\n assert sn != tskit.NULL\n assert ts.node(n).metadata == sts.node(sn).metadata\n assert t.num_tracked_samples(n) == st.num_samples(sn)\n if ind != tskit.NULL:\n sind = sts.node(sn).individual\n assert sind != tskit.NULL\n assert (\n ts.individual(ind).metadata\n == sts.individual(sind).metadata\n )\n sn = st.parent(sn)\n last_n = t.num_tracked_samples(n)\n n = t.parent(n)\n st_nodes = list(st.nodes())\n for k, v in enumerate(visited):\n assert v == (k in st_nodes)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.random.seed", "numpy.unique", "numpy.random.choice", "numpy.zeros_like", "numpy.where" ] ]
blevine37/HierQuant
[ "8b1959ff7b15a691c3cf322ddf109aae003d8725" ]
[ "cmatrix.py" ]
[ "\"\"\"Compressed matrix.\n\nHierarchical matrix implementation.\n\nCode for ab initio quantum chemistry based on a compressed representation of key data structures. Developed by researchers at the Institute for Advanced Copmutational Science (IACS) at Stony Brook University (SBU). Currently a work in progress.\n\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse.linalg as lg\nfrom random import randrange\n\nimport string\nimport random\n\nclass CMatrix():\n \"\"\"CMAtrix class that represents a matrix or a block\"\"\"\n\n def __init__(self, mat):\n \"\"\"\n CMAtrix constructor.\n\n Args:\n mat (ndarray) : Matrix to compess.\n \"\"\"\n\n # Store the number of rows and columns\n self.nr = mat.shape[0]\n self.nc = mat.shape[1]\n\n # Randomly choose matrix (or block) type\n # 0 - dense\n # 1 - decomposed\n # 2 - hierarchical\n self.type = randrange(3)\n\n # Ranks\n k_svd = 6\n k_dense = 2 * k_svd\n\n # Make small blocks dense\n if min(self.nr, self.nc) <= k_dense:\n self.type = 0\n\n # Store the block as is if it is a dense type block\n if self.type == 0:\n self.mat = mat\n\n # Or decompose it using any technique\n # Used SVD, because it was the easiest to implement\n elif self.type == 1:\n u, s, vt = lg.svds(mat, k = k_svd) # SVD rank is 6 by default\n self.u = u\n self.s = s\n self.vt = vt\n\n # Or break it into sub-blocks\n else:\n i = self.nr // 2\n j = self.nc // 2\n self.b11 = CMatrix(mat[:i,:j])\n self.b12 = CMatrix(mat[:i,j:])\n self.b21 = CMatrix(mat[i:,:j])\n self.b22 = CMatrix(mat[i:,j:])\n\n\n def dot(self,x):\n \"\"\"\n Matrix-vector multiplication.\n\n Args:\n x (ndarray) : Initial vector.\n\n Returns:\n y (ndarray) : Resulting vector.\n \"\"\"\n\n # Check if matrix and vector sizes mismatch\n if self.nc != len(x):\n print('Matrix-vector size mismatch')\n sys.exit(1)\n\n # Dense multiplication\n if self.type == 0:\n y = self.mat.dot(x)\n\n # Or multiplication using SVD decomposition\n elif self.type == 1:\n sigma = np.diagflat(self.s) # Form a diagonal matrix from vector S\n y = self.u.dot(sigma.dot(self.vt.dot(x)))\n\n # Or delegate to sub-blocks and combine pieces\n else:\n j = self.nc // 2\n y1 = self.b11.dot(x[:j]) + self.b12.dot(x[j:])\n y2 = self.b21.dot(x[:j]) + self.b22.dot(x[j:])\n y = np.concatenate([y1,y2])\n\n return y\n\n\n def bstr(self):\n \"\"\"\n Computes a character representation of the matrix.\n - Dense block is shown with a letter\n - Decomposed block is shown with a digit\n\n Different sub-blocks use different letters and digits\n so that the overall structure can be easily seen.\n\n Returns:\n s (ndarray) : 2D array of characters, where one character is one element.\n \"\"\"\n\n # Return a block filled with the same random letter if dense\n if self.type == 0:\n char = random.choice(string.ascii_letters)\n s = np.full((self.nr, self.nc), char)\n\n # Or return a block filled with the same random digit if decomposed\n elif self.type == 1:\n digit = random.choice(string.digits)\n s = np.full((self.nr, self.nc), digit)\n\n # Or delegate to sub-blocks and combine pieces\n else:\n s1 = np.concatenate((self.b11.bstr(), self.b12.bstr()), axis=1)\n s2 = np.concatenate((self.b21.bstr(), self.b22.bstr()), axis=1)\n s = np.concatenate((s1, s2), axis=0)\n\n return s\n\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the matrix.\n\n Useful for debugging.\n \"\"\"\n\n s = ' - Dense block is shown with a letter\\n'\n s += ' - Decomposed block is shown with a digit\\n\\n'\n for i in self.bstr():\n s += ''.join(j for j in i)\n s += '\\n'\n\n return s\n\n def memory(self):\n \"\"\"\n Computes the size of memory used.\n\n Returns:\n k (int) : The number of doubles stored.\n \"\"\"\n\n # Return the number of elements if dense\n if self.type == 0:\n k = self.nr * self.nc\n\n # Or the number of doubles in SVD decomposition\n elif self.type == 1:\n k = self.u.shape[0] * self.u.shape[1]\n k += self.s.shape[0]\n k += self.vt.shape[0] * self.vt.shape[1]\n\n # Or sum over sub-blocks\n else:\n k1 = self.b11.memory() + self.b12.memory()\n k2 = self.b21.memory() + self.b22.memory()\n k = k1 + k2\n\n return k\n\n\n def error(self,mat):\n \"\"\"\n Computes matrix error.\n\n Generates a number of random vectors, multiplies by the matrix\n and computes the residual norms. The error is relative and is defined as\n a ratio of the residual norm and the norm of the exact solution. \n The final error is averaged over all random vectors.\n\n Args:\n mat (ndarray) : The initial full matrix that is approximated.\n\n Returns:\n e (double): Error.\n \"\"\"\n \n count = 100\n e = 0\n\n for i in range(count):\n x = np.random.rand(n)\n yd = mat.dot(x)\n yc = self.dot(x)\n dt = yd - yc\n e += np.linalg.norm(dt) / np.linalg.norm(yd)\n \n e /= count\n \n return e\n\n\nif __name__ == \"__main__\":\n\n # Matrix size\n n = 40\n\n # Generate a random symmetric matrix\n mat = np.random.rand(n,n)\n mat = (mat + mat.T) / 2\n print('Given matrix:')\n print(mat)\n print()\n\n # Generate a random vector\n print('Given vector:')\n x = np.random.rand(n)\n print(x)\n print()\n\n # Dense matrix vector multiplication\n print('Matrix vector product [Dense]:')\n y = mat.dot(x)\n print(y)\n print()\n\n # Generate a compressed matrix that approximates the given matrix\n print('CMatrix:')\n cmat = CMatrix(mat)\n print(cmat)\n print()\n\n # Compressed matrix vector multiplication\n print('Matrix vector product [CMatrix]:')\n y = cmat.dot(x)\n print(y)\n print()\n\n # Format strings for printing\n fs = '{0:10s} {1:10d} {2:10.5f}'\n fd = '{0:10d} {1:10d} {2:10.5f}'\n\n # Print the information about both matrices\n print(' Name Memory RelError')\n kd = n * n\n kc = cmat.memory()\n ed = 0\n ec = cmat.error(mat)\n print(fs.format('Dense', kd, ed))\n print(fs.format('CMatrix', kc, ec))\n print()\n print()\n\n # Another example\n # Generate many compressed representations of the same matrix\n # Pick the one with twice smaller memory and the smallest error\n print('==================================')\n print('===== Searching for the best =====')\n print('======= Twice smaller size =======')\n print('==================================')\n print('')\n\n # Print header for the table\n print(' Name Memory RelError')\n\n # Generate and find the best\n best_ec = float(\"inf\")\n for i in range(100):\n\n cmat = CMatrix(mat)\n kc = cmat.memory()\n ec = cmat.error(mat)\n print(fd.format(i, kc, ec))\n if kc < kd / 2 and ec < best_ec:\n best_cmat = cmat\n best_kc = kc\n best_ec = ec\n best_i = i\n\n # Print the best matrix\n print('Best:')\n print(fd.format(best_i, best_kc, best_ec))\n print()\n print('Best CMatrix:')\n print(best_cmat)\n\n" ]
[ [ "numpy.diagflat", "numpy.linalg.norm", "scipy.sparse.linalg.svds", "numpy.concatenate", "numpy.full", "numpy.random.rand" ] ]
pozzo-group-robots/OT2Protocols2
[ "2386dae2b7f18a8a42fb8b4a0b8d2c6b2f3ff440" ]
[ "code-collab/hdf5tools/zetasizer_hdf5.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport os\nimport glob\nimport h5py\nimport csv\nfrom fuzzywuzzy import fuzz \nfrom fuzzywuzzy import process\nfrom datetime import datetime\n\n# Make it so samples are saved as own hdf5 files instead. But the nwould groups names just have a dataset within it. \n# Start writing metadata function to grab (if available) hardcoded metadata headers and corresponding keys. \n\n\n# In[2]:\n\n\ndef find_file_path(dir_path):\n \"\"\"\n Finds the file path of files with the appropiate .txt extension\n \n Args: \n dir_path: raw string\n Absolute path of directory where files will be searched for.\n\n Returns:\n working_file_path: list\n A list of absolute paths of txt files found in the provided directory.\n \"\"\"\n\n if os.path.exists(dir_path) is False: #checks and balances\n raise ValueError('Provided path does not exist')\n if os.path.isdir(dir_path) is False:\n raise ValueError('Provided path is not a directory, potentially a file path')\n\n os.chdir(dir_path)\n\n r_file_paths = glob.glob('./*.txt') # list of relative paths of .txt files\n \n working_file_path = [os.path.abspath(r_path) for r_path in r_file_paths]\n \n return working_file_path \n\n\n# In[3]:\n\n\ndef key_value_data_pairs(data_file_path): \n \"\"\"\n Given an txt file of alternating rows of key and values, groups keys list and values list in a list. \n \n Args: \n data_file_path: raw string\n Absolute path of txt file of interest. \n \n Returns:\n kv_pairs: list\n List containing sublist which contain keys and values, [[k1,v1],[k2,v2]...].\n \n \"\"\"\n with open(data_file_path, mode='r') as file: \n reader = csv.reader(file, delimiter=',')\n data = [row for row in reader] \n \n kv_pairs = [data[i:i+2] for i in range(0,len(data),2)]\n return kv_pairs\n\n\n# In[4]:\n\n\ndef create_file(file_path, default_name = True, date_ref = True): \n \"\"\"\n Creates root hdf5 file given an absolute file path. \n Note hdf5 will remain open when returned, to close file.close().\n Useful when wanting to keep experiments under the same Hdf5 file.\n \n Args: \n file_path: raw string\n Absolute path of file.\n \n deafult_name: bool, optional\n Optional string that will be used as created hdf5 file name. If default_name = None\n then file will have the same name as the provided file from file_path. \n \n date_ref: bool, optional\n if False, will block metadata addition of Year-Month-Day-Hour-Minute. Default is True. \n \n Returns: \n hdf5_file: File Object-Like\n A file object like hdf5 root file. \n \n \"\"\"\n if default_name is True:\n hdf5_file_name = os.path.splitext(file_path)[0] + str('.hdf5') #.splitext makes a tuple = (path w/out ext, .ext)\n elif type(default_name) == str:\n hdf5_file_name = default_name + str('.hdf5')\n else:\n raise ValueError('Data type of provide hdf5 file name must be str.') \n\n hdf5_file = h5py.File(name=hdf5_file_name, mode = 'w-')\n \n if date_ref is True:\n date_info = datetime.now().strftime('%Y-%m-%d-%H-%M')\n hdf5_file.attrs['Creation Timestamp'] = date_info\n \n return hdf5_file\n\n# In[4.1]:\n\ndef create_file_sample(name, date_ref = True): \n \"\"\"\n Creates root hdf5 file given an absolute file path. \n Note hdf5 will remain open when returned, to close file.close().\n \n Args: \n file_path: raw string\n Absolute path of file.\n \n deafult_name: bool, optional\n Optional string that will be used as created hdf5 file name. If default_name = None\n then file will have the same name as the provided file from file_path. \n \n date_ref: bool, optional\n if False, will block metadata addition of Year-Month-Day-Hour-Minute. Default is True. \n \n Returns: \n hdf5_file: File Object-Like\n A file object like hdf5 root file. \n \n \"\"\"\n date_info = datetime.now().strftime('%Y-%m-%d-%H-%M')\n random_number = str(np.random.randint(1000))\n hdf5_file_name = name + random_number + str('.hdf5') # initially used date, but if two samples have same name and created within ms then error\n hdf5_file = h5py.File(name=hdf5_file_name, mode = 'w-')\n\n if date_ref is True:\n hdf5_file.attrs['Creation Timestamp'] = date_info\n\n return hdf5_file\n\n# In[5]:\n\n\ndef fuzzy_key_pairing(str_obj_list, sesitivity_cutoff = 80): # no need for values \n \"\"\"\n Indentifies index ranges of highly-similar str objectsg. It utilizes the string matching fuzzywuzzy package to compare str objects \n by looking at the following str object AND previous str object in provided list and assiging forward and backwards similarity scores. \n When appropiate the function uses both similarity scores to deteremine whether adjacent elements are similar enough to be \n grouped/included in the same index ranges returned. \n \n Understand this is case sensitive, please VERIFY results. \n \n Args: \n str_obj_list: list\n List of str objects.\n \n sensitivity_cutoff: float or int\n The criteria used for determine whether similarity scores is high enough to group two or more str object. \n The higher the cutoff the more sensitive the grouping will be. \n \n Returns: \n start_stop_idxs: nested list\n List containing lists of length 2, with the entries corresponding to indexes begenning and end of string matching\n in provided key list.\n \n Example:\n str_obj_list_1 = ['Size[1]','Size[2]','Size[3]','Size[4]','Intensity[1]','Intensity[2]','Intensity[3]']\n start_stop_idxs = [[0:3],[4:6]]\n \n \n \"\"\"\n looking_forward_list = []\n looking_backward_list = []\n\n for i in range(len(str_obj_list)):\n \n if i == 0: # At beginning of list the only option is to compare forward\n correct_f = fuzz.ratio(str_obj_list[i],str_obj_list[i+1])\n correct_b = correct_f\n \n elif i == len(str_obj_list)-1:# At end of list the only option is to compare backwards\n correct_b = fuzz.ratio(str_obj_list[i],str_obj_list[i-1])\n correct_f = correct_b # as no way to go back\n \n else: # In all other cases it is possible to compare and assign forward and backward similarity scores\n correct_f = fuzz.ratio(str_obj_list[i],str_obj_list[i+1])\n correct_b = fuzz.ratio(str_obj_list[i],str_obj_list[i-1])\n \n looking_forward_list.append(correct_f)\n looking_backward_list.append(correct_b)\n\n start_stop_idxs = []\n\n for i,(forward_score,backward_score) in enumerate(zip(looking_forward_list,looking_backward_list)):\n \n if backward_score < sesitivity_cutoff and forward_score>sesitivity_cutoff: #start\n start_stop_idxs.append(i)\n \n elif forward_score < sesitivity_cutoff and backward_score>sesitivity_cutoff: #stop\n start_stop_idxs.append(i+1)\n \n else:\n pass\n\n start_stop_idxs = [start_stop_idxs[i:i+2] for i in range(0,len(start_stop_idxs),2)]\n start_stop_idxs[-1].append(len(str_obj_list)) # Accounts for matching ending where similarity scores are satis. until str_obj_list \n # Potential Issue: specific cases where scores fail at beginning of list providing a one element range. In general one element ranges are problmatic. \n \n \n return start_stop_idxs\n\n\n# In[6]:\n\n\ndef subgroup(listy, ranges): \n \"\"\"\n Given list and index ranges will group elements into a list based on ranges. Grouped elements are not \"left behind\", \n the grouping leads to deletion of any other instance of the element. \n \n Ex: [1,2,3,4,5,6] w/ ranges [[3,5]] => [1,2,3,[4,5,6]]\n \n Args:\n listy: list\n List that will be split by corresponding ranges.\n \n ranges: list\n List containing list of len two with ranges. Sublist is to have the starting index as the first entry and the \n ending index as the second entry.\n \n Returns:\n listy: list\n Updated list with grouped and replaced elements in accordance with provided ranges. \n \n \"\"\"\n \n for i,r in enumerate(reversed(ranges)): # Going backwards prevents issue of keeping track of locations once deletion occurs.\n \n r_min = r[0]\n r_max = r[1]\n \n replacement = listy[r_min:r_max]\n del listy[r_min:r_max]\n \n listy.insert(r_min,replacement)\n\n return listy\n \n # Idea: Make all elements within Listy list instead of having a combination, should remove one more step of logic? Well, gies both ways\n\n\n# In[7]:\n\n\ndef zetasizer_csv_to_hdf5(data_file_path): # How to pass arguments that are intended for functions used in this functions. \n \n \"\"\"\n Takes key and value pairs and using fuzzy logic matches and groups \"similar\" keys and values then writes \n into groups and datasets. The function is a wrapper of all other functions in this module and is able \n to create and store information from a csv/text file to a hdf5 file given only the absolute file path. \n\n At the moment the requirements to utilize this function correctly are: \n - csv/txt file must be arranged in alternation order of row of headers and row of keys. \n - Add any other encounted limitations.\n \n Args: \n data_file_path: raw string\n Absolute path of txt file of interest. \n \n Returns: \n hdf5_file: File Object-Like\n A file object like hdf5 root file. Note: Ensure to close this file once no longer in use.\n \"\"\"\n \n #hdf5_root_file = create_file(data_file_path)\n \n paired_kv_data = key_value_data_pairs(data_file_path)\n\n \n for i,data_pair in enumerate(paired_kv_data): # Make more general and tease out issue with encoding/decoding.\n \n k_orig = data_pair[0]\n v_orig = data_pair[1]\n\n indexer = k_orig\n ranges = fuzzy_key_pairing(k_orig) # Pull out?\n\n k_u = np.asarray(subgroup(k_orig,ranges))\n v_u = np.asarray(subgroup(v_orig,ranges))\n\n v_enc = []\n \n for iter in v_u:\n \n if type(iter) == str:\n v_enc.append(iter.encode(\"ascii\", \"ignore\"))\n \n elif type(iter) == list: \n asciiList = [n.encode(\"ascii\", \"ignore\") for n in iter]\n v_enc.append(asciiList)\n \n else:\n pass\n\n k_u2 = []\n \n for iter in k_u:\n \n if type(iter) == str:\n k_u2.append(iter)\n \n elif type(iter) == list:\n k_u2.append(iter[0])\n \n else:\n pass\n \n hdf5_file = create_file_sample(name = v_u[0]) # hard coded for sample name, make it a searchable feature\n print('Creating Root File '+v_u[0])\n \n for k,v in zip(k_u2, v_enc):\n group = hdf5_file.create_group(name = k)\n dataset = group.create_dataset(name=k,data=v)\n print('saving dataset'+k)\n\n\n\n" ]
[ [ "numpy.random.randint" ] ]
dougalsutherland/compare_gan
[ "615bdc6fc54e5c074adeee543b779dd504dc7e9f" ]
[ "compare_gan/src/fid_score.py" ]
[ "# coding=utf-8\n# Copyright 2018 Google LLC & Hwalsuk Lee.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library for evaluating GAN models using Frechet Inception distance.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport functools\nimport numpy as np\nimport tensorflow as tf\n\nlogging = tf.logging\ntfgan_eval = tf.contrib.gan.eval\n\n\n\ndef get_fid_function(eval_image_tensor, gen_image_tensor, num_gen_images,\n num_eval_images, image_range, inception_graph):\n \"\"\"Get a fn returning the FID between distributions defined by two tensors.\n\n Wraps session.run calls to generate num_eval_images images from both\n gen_image_tensor and eval_image_tensor (as num_eval_images is often much\n larger than the training batch size). Then finds the FID between these two\n groups of images.\n\n Args:\n eval_image_tensor: Tensor of shape [batch_size, dim, dim, 3] which evaluates\n to a batch of real eval images. Should be in range [0..255].\n gen_image_tensor: Tensor of shape [batch_size, dim, dim, 3] which evaluates\n to a batch of gen images. Should be in range [0..255].\n num_gen_images: Number of generated images to evaluate FID between\n num_eval_images: Number of real images to evaluate FID between\n image_range: Range of values in the images. Accepted values: \"0_255\".\n inception_graph: GraphDef with frozen inception model.\n\n Returns:\n eval_fn: a function which takes a session as an argument and returns the\n FID between num_eval_images images generated from the distributions\n defined by gen_image_tensor and eval_image_tensor\n \"\"\"\n\n assert image_range == \"0_255\"\n # Set up graph for generating features to pass to FID eval.\n batch_size_gen = gen_image_tensor.get_shape().as_list()[0]\n batch_size_real = eval_image_tensor.get_shape().as_list()[0]\n\n # We want to cover only the case that the real data is bigger than\n # generated (50k vs 10k for CIFAR to be comparable with SN GAN)\n assert batch_size_real >= batch_size_gen\n assert batch_size_real % batch_size_gen == 0\n\n # We preprocess images and extract inception features as soon as they're\n # generated. This is to maintain memory efficiency if the images are large.\n # For example, for ImageNet, the inception features are much smaller than\n # the images.\n eval_features_tensor = get_inception_features(eval_image_tensor,\n inception_graph)\n gen_features_tensor = get_inception_features(gen_image_tensor,\n inception_graph)\n\n num_gen_images -= num_gen_images % batch_size_gen\n num_eval_images -= num_eval_images % batch_size_real\n logging.info(\"Evaluating %d real images to match batch size %d\",\n num_eval_images, batch_size_real)\n logging.info(\"Evaluating %d generated images to match batch size %d\",\n num_gen_images, batch_size_gen)\n # Make sure we run the same number of batches, as this is what TFGAN code\n # assumes.\n assert num_eval_images // batch_size_real == num_gen_images // batch_size_gen\n\n # Set up another subgraph for calculating FID from fed images.\n with tf.device(\"/cpu:0\"):\n feed_gen_features = tf.placeholder(\n dtype=tf.float32, shape=[num_gen_images] +\n gen_features_tensor.get_shape().as_list()[1:])\n feed_eval_features = tf.placeholder(\n dtype=tf.float32, shape=[num_eval_images] +\n eval_features_tensor.get_shape().as_list()[1:])\n\n # Set up a variable to hold the last FID value.\n fid_variable = tf.Variable(0.0, name=\"last_computed_FID\", trainable=False)\n\n # Summarize the last computed FID.\n tf.summary.scalar(\"last_computed_FID\", fid_variable)\n\n # Create the tensor which stores the computed FID. We have extracted the\n # features at the point of image generation so classifier_fn=tf.identity.\n fid_tensor = tfgan_eval.frechet_classifier_distance(\n classifier_fn=tf.identity,\n real_images=feed_eval_features,\n generated_images=feed_gen_features,\n num_batches=num_eval_images // batch_size_real)\n\n # Create an op to update the FID variable.\n update_fid_op = fid_variable.assign(fid_tensor)\n\n # Ensure that the variable is updated every time the FID is computed.\n with tf.control_dependencies([update_fid_op]):\n fid_tensor = tf.identity(fid_tensor)\n\n # Define a function which wraps some session.run calls to generate a large\n # number of images and compute FID on them.\n def eval_fn(session):\n \"\"\"Function which wraps session.run calls to evaluate FID.\"\"\"\n logging.info(\"Evaluating.....\")\n logging.info(\"Generating images to feed\")\n num_batches = num_eval_images // batch_size_real\n eval_features_np = []\n gen_features_np = []\n for _ in range(num_batches):\n e, g = session.run([eval_features_tensor, gen_features_tensor])\n eval_features_np.append(e)\n gen_features_np.append(g)\n\n logging.info(\"Generated images successfully.\")\n eval_features_np = np.concatenate(eval_features_np)\n gen_features_np = np.concatenate(gen_features_np)\n\n logging.info(\"Computing FID with generated images...\")\n fid_result = session.run(fid_tensor, feed_dict={\n feed_eval_features: eval_features_np,\n feed_gen_features: gen_features_np})\n\n logging.info(\"Computed FID: %f\", fid_result)\n return fid_result\n\n return eval_fn\n\n\ndef preprocess_for_inception(images):\n \"\"\"Preprocess images for inception.\n\n Args:\n images: images minibatch. Shape [batch size, width, height,\n channels]. Values are in [0..255].\n\n Returns:\n preprocessed_images\n \"\"\"\n\n # Images should have 3 channels.\n assert images.shape[3].value == 3\n\n # tfgan_eval.preprocess_image function takes values in [0, 255]\n with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),\n tf.assert_less_equal(images, 255.0)]):\n images = tf.identity(images)\n\n preprocessed_images = tf.map_fn(\n fn=tfgan_eval.preprocess_image,\n elems=images,\n back_prop=False\n )\n\n return preprocessed_images\n\n\ndef get_inception_features(inputs, inception_graph, layer_name=\"pool_3:0\"):\n \"\"\"Compose the preprocess_for_inception function with TFGAN run_inception.\"\"\"\n\n preprocessed = preprocess_for_inception(inputs)\n return tfgan_eval.run_inception(\n preprocessed,\n graph_def=inception_graph,\n output_tensor=layer_name)\n\n\ndef run_inception(images, inception_graph):\n preprocessed = tfgan_eval.preprocess_image(images)\n logits = tfgan_eval.run_inception(preprocessed, graph_def=inception_graph)\n return logits\n\n\ndef inception_score_fn(images, num_batches, inception_graph):\n return tfgan_eval.classifier_score(\n images, num_batches=num_batches,\n classifier_fn=functools.partial(run_inception,\n inception_graph=inception_graph))\n" ]
[ [ "tensorflow.device", "tensorflow.Variable", "tensorflow.control_dependencies", "tensorflow.identity", "numpy.concatenate", "tensorflow.assert_less_equal", "tensorflow.map_fn", "tensorflow.assert_greater_equal", "tensorflow.summary.scalar" ] ]
rafa-cxg/tensorflow-tensorrt-yolo-interface
[ "843beef5c70b5404b59df3bde40977b089f985b0" ]
[ "utils/utils.py" ]
[ "import cv2\nimport time\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom .bbox import BoundBox, bbox_iou\nfrom scipy.special import expit\ndef gpu_nms(boxes, scores, num_classes, max_boxes=20, score_thresh=0.4, iou_thresh=0.5):\n \"\"\"\n /*----------------------------------- NMS on gpu ---------------------------------------*/\n\n Arguments:\n boxes -- tensor of shape [1, 10647, 4] # 10647 boxes\n scores -- tensor of shape [1, 10647, num_classes], scores of boxes\n classes -- the return value of function `read_coco_names`\n Note:Applies Non-max suppression (NMS) to set of boxes. Prunes away boxes that have high\n intersection-over-union (IOU) overlap with previously selected boxes.\n\n max_boxes -- integer, maximum number of predicted boxes you'd like, default is 20\n score_thresh -- real value, if [ highest class probability score < score_threshold]\n then get rid of the corresponding box\n iou_thresh -- real value, \"intersection over union\" threshold used for NMS filtering\n \"\"\"\n\n boxes_list, label_list, score_list = [], [], []\n max_boxes = tf.constant(max_boxes, dtype='int32')\n\n # since we do nms for single image, then reshape it\n boxes = tf.reshape(boxes, [-1,4]) # '-1' means we don't konw the exact number of boxes\n # confs = tf.reshape(confs, [-1,1])\n score = tf.reshape(scores, [-1,num_classes])\n\n # Step 1: Create a filtering mask based on \"box_class_scores\" by using \"threshold\".\n mask = tf.greater_equal(score, tf.constant(score_thresh))\n # Step 2: Do non_max_suppression for each class\n for i in range(num_classes):\n # Step 3: Apply the mask to scores, boxes and pick them out\n filter_boxes = tf.boolean_mask(boxes, mask[:,i])\n filter_score = tf.boolean_mask(score[:,i], mask[:,i])\n nms_indices = tf.image.non_max_suppression(boxes=filter_boxes,\n scores=filter_score,\n max_output_size=max_boxes,\n iou_threshold=iou_thresh, name='nms_indices')\n label_list.append(tf.ones_like(tf.gather(filter_score, nms_indices), 'int32')*i)\n boxes_list.append(tf.gather(filter_boxes, nms_indices))\n score_list.append(tf.gather(filter_score, nms_indices))\n\n boxes = tf.concat(boxes_list, axis=0)\n score = tf.concat(score_list, axis=0)\n label = tf.concat(label_list, axis=0)\n\n return boxes, score, label\n\ndef _sigmoid(x):\n return expit(x)\n\ndef makedirs(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\ndef evaluate(model, \n generator, \n iou_threshold=0.5,\n obj_thresh=0.5,\n nms_thresh=0.45,\n net_h=416,\n net_w=416,\n save_path=None):\n \"\"\" Evaluate a given dataset using a given model.\n code originally from https://github.com/fizyr/keras-retinanet\n\n # Arguments\n model : The model to evaluate.\n generator : The generator that represents the dataset to evaluate.\n iou_threshold : The threshold used to consider when a detection is positive or negative.\n obj_thresh : The threshold used to distinguish between object and non-object\n nms_thresh : The threshold used to determine whether two detections are duplicates\n net_h : The height of the input image to the model, higher value results in better accuracy\n net_w : The width of the input image to the model\n save_path : The path to save images with visualized detections to.\n # Returns\n A dict mapping class names to mAP scores.\n \"\"\" \n # gather all detections and annotations\n all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]\n all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]\n\n for i in range(generator.size()):\n raw_image = [generator.load_image(i)]\n\n # make the boxes and the labels\n pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]\n\n score = np.array([box.get_score() for box in pred_boxes])\n pred_labels = np.array([box.label for box in pred_boxes]) \n \n if len(pred_boxes) > 0:\n pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes]) \n else:\n pred_boxes = np.array([[]]) \n \n # sort the boxes and the labels according to scores\n score_sort = np.argsort(-score)\n pred_labels = pred_labels[score_sort]\n pred_boxes = pred_boxes[score_sort]\n \n # copy detections to all_detections\n for label in range(generator.num_classes()):\n all_detections[i][label] = pred_boxes[pred_labels == label, :]\n\n annotations = generator.load_annotation(i)\n \n # copy detections to all_annotations\n for label in range(generator.num_classes()):\n all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()\n\n # compute mAP by comparing all detections and all annotations\n average_precisions = {}\n \n for label in range(generator.num_classes()):\n false_positives = np.zeros((0,))\n true_positives = np.zeros((0,))\n scores = np.zeros((0,))\n num_annotations = 0.0\n\n for i in range(generator.size()):\n detections = all_detections[i][label]\n annotations = all_annotations[i][label]\n num_annotations += annotations.shape[0]\n detected_annotations = []\n\n for d in detections:\n scores = np.append(scores, d[4])\n\n if annotations.shape[0] == 0:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n continue\n\n overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)\n assigned_annotation = np.argmax(overlaps, axis=1)\n max_overlap = overlaps[0, assigned_annotation]\n\n if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n false_positives = np.append(false_positives, 0)\n true_positives = np.append(true_positives, 1)\n detected_annotations.append(assigned_annotation)\n else:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n\n # no annotations -> AP for this class is 0 (is this correct?)\n if num_annotations == 0:\n average_precisions[label] = 0\n continue\n\n # sort by score\n indices = np.argsort(-scores)\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n\n # compute false positives and true positives\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n\n # compute recall and precision\n recall = true_positives / num_annotations\n precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n\n # compute average precision\n average_precision = compute_ap(recall, precision) \n average_precisions[label] = average_precision\n\n return average_precisions \n\ndef correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):\n if (float(net_w)/image_w) < (float(net_h)/image_h):\n new_w = net_w\n new_h = (image_h*net_w)/image_w\n else:\n new_h = net_w\n new_w = (image_w*net_h)/image_h\n \n for i in range(len(boxes)):\n x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w\n y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h\n \n boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)\n boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)\n boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)\n boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)\n \ndef do_nms(boxes, nms_thresh):\n if len(boxes) > 0:\n nb_class = len(boxes[0].classes)\n else:\n return\n \n for c in range(nb_class):\n sorted_indices = np.argsort([-box.classes[c] for box in boxes])\n\n for i in range(len(sorted_indices)):\n index_i = sorted_indices[i]\n\n if boxes[index_i].classes[c] == 0: continue\n\n for j in range(i+1, len(sorted_indices)):\n index_j = sorted_indices[j]\n\n if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:\n boxes[index_j].classes[c] = 0\n\ndef decode_netout(netout, anchors, obj_thresh, net_h, net_w):\n grid_h, grid_w = netout.shape[:2]\n nb_box = 3\n netout = netout.reshape((grid_h, grid_w, nb_box, -1))\n nb_class = netout.shape[-1] - 5\n\n boxes = []\n\n netout[..., :2] = _sigmoid(netout[..., :2])\n netout[..., 4] = _sigmoid(netout[..., 4])\n netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])\n netout[..., 5:] *= netout[..., 5:] > obj_thresh\n\n for i in range(grid_h*grid_w):\n row = i // grid_w\n col = i % grid_w\n \n for b in range(nb_box):\n # 4th element is objectness score\n objectness = netout[row, col, b, 4]\n \n if(objectness <= obj_thresh): continue\n \n # first 4 elements are x, y, w, and h\n x, y, w, h = netout[row,col,b,:4]\n\n x = (col + x) / grid_w # center position, unit: image width\n y = (row + y) / grid_h # center position, unit: image height\n w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width\n h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height \n \n # last elements are class probabilities\n classes = netout[row,col,b,5:]\n \n box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)\n\n boxes.append(box)\n\n return boxes\n\ndef preprocess_input(image, net_h, net_w):\n new_h, new_w, _ = image.shape\n\n # determine the new size of the image\n if (float(net_w)/new_w) < (float(net_h)/new_h):\n new_h = (new_h * net_w)//new_w\n new_w = net_w\n else:\n new_w = (new_w * net_h)//new_h\n new_h = net_h\n\n # resize the image to the new size\n resized = cv2.resize(image[:,:,::-1]/255., (new_w, new_h))\n\n # embed the image into the standard letter box\n new_image = np.ones((net_h, net_w, 3)) * 0.5\n new_image[(net_h-new_h)//2:(net_h+new_h)//2, (net_w-new_w)//2:(net_w+new_w)//2, :] = resized\n new_image = np.expand_dims(new_image, 0)\n\n return new_image\n\ndef normalize(image):\n return image/255.\n \ndef get_yolo_boxes(sess, input_tensor, output_tensors, images, net_h, net_w, anchors, obj_thresh, nms_thresh):\n image_h, image_w, _ = images[0].shape\n nb_images = len(images)\n batch_input = np.zeros((nb_images, net_h, net_w, 3))\n\n # preprocess the input\n for i in range(nb_images):\n batch_input[i] = preprocess_input(images[i], net_h, net_w) \n\n # run the prediction\n # batch_output = model.predict_on_batch(batch_input)\n\n batch_output = sess.run(output_tensors, feed_dict={input_tensor:\n np.expand_dims(\n batch_input[i], axis=0)})\n batch_boxes = [None]*nb_images\n\n for i in range(nb_images):\n yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]\n boxes = []\n\n # decode the output of the network\n for j in range(len(yolos)):\n yolo_anchors = anchors[(2-j)*6:(3-j)*6] # config['model']['anchors']\n boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)\n\n # correct the sizes of the bounding boxes\n correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)\n\n # suppress non-maximal boxes\n do_nms(boxes, nms_thresh)\n # gpu_nms(boxes)\n batch_boxes[i] = boxes\n\n return batch_boxes\n\n\ndef get_yolo_boxes_ori(model, images, net_h, net_w, anchors, obj_thresh, nms_thresh):\n image_h, image_w, _ = images[0].shape\n nb_images = len(images)\n batch_input = np.zeros((nb_images, net_h, net_w, 3))\n\n # preprocess the input\n for i in range(nb_images):\n batch_input[i] = preprocess_input(images[i], net_h, net_w)\n\n # run the prediction\n batch_output = model.predict_on_batch(batch_input)\n batch_boxes = [None] * nb_images\n\n for i in range(nb_images):\n yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]\n boxes = []\n\n # decode the output of the network\n for j in range(len(yolos)):\n yolo_anchors = anchors[(2 - j) * 6:(3 - j) * 6] # config['model']['anchors']\n boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)\n\n # correct the sizes of the bounding boxes\n correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)\n\n # suppress non-maximal boxes\n do_nms(boxes, nms_thresh)\n\n batch_boxes[i] = boxes\n\n return batch_boxes\ndef compute_overlap(a, b):\n \"\"\"\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n Parameters\n ----------\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])\n ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua \n \ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], recall, [1.]))\n mpre = np.concatenate(([0.], precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap \n\ndef _softmax(x, axis=-1):\n x = x - np.amax(x, axis, keepdims=True)\n e_x = np.exp(x)\n \n return e_x / e_x.sum(axis, keepdims=True)\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "tensorflow.concat", "tensorflow.image.non_max_suppression", "numpy.cumsum", "numpy.concatenate", "numpy.exp", "numpy.where", "tensorflow.boolean_mask", "numpy.finfo", "tensorflow.gather", "numpy.argmax", "numpy.zeros", "numpy.append", "numpy.argsort", "numpy.array", "numpy.sum", "tensorflow.constant", "numpy.maximum", "scipy.special.expit", "tensorflow.reshape", "numpy.ones" ] ]
zentonllo/gcom
[ "3761ff4f2a68137ac196e75c8651260cb8c79e69" ]
[ "pr3/pruebas_alberto/mlp.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\nimport sys\nimport numpy as np\nimport mlpOptimizer as mlpo\n\n__author__ = \"Ignacio Casso, Daniel Gamo, Gwydion J. Martín, Alberto Terceño\"\n\n\nclass MLP(object):\n\n # Here are some appreciations about notation and the structure of vectors\n # and matrix:\n # N = data number\n # R = layers number (without the imput layer, as it has no activation\n # functions nor weights subindex will be used to name these layers,\n # with 0 being the input layer.\n # Dk = number of neurons on layer k\n\n # The weights' matrix W for each layer will have dimension (Dk, Dk+1)\n # Wij is the i-th weight of the j-th neuron on layer k+1. This decision is\n # forced because of the template, so:\n # to operate with a units vector on the matrix, you have to multiply by the\n # left and so both the activations and the units will be row vectors.\n\n # The matrix that group vectors with N different data (like the matrix x or\n # y) will have a row for each data, so they'll have dimension (N,?).\n\n # The lists of weights' and biases' matrix have the k-th layer data in the\n # (k-1)-th index. It's important to keep this phase shift in mind\n\n # self.nb_layers = R\n\n def __init__(self, K_list,\n activation_functions, diff_activation_functions,\n init_seed=None):\n\n self.K_list = K_list\n self.nb_layers = len(K_list) - 1 # = R\n\n # We suppose they're lists of R elements\n self.activation_functions = activation_functions\n # and that the k-th index represents the (k+1)-th layer\n self.diff_activation_functions = diff_activation_functions\n\n self.init_seed = init_seed\n\n self.weights_list = None # list of R (Dk,Dk+1) matrix\n self.biases_list = None # list of R row vectors of Dk+1 elements\n self.grad_w_list = None # list of R (Dk,Dk+1) matrix\n self.grad_b_list = None # list of R row vectors of Dk+1 elements\n\n self.activations = None # list of R+1 (N,Dk) matrix\n self.units = None # list of R+1 (N,Dk) matrix\n self.y = None # (N,Dr) matrix\n\n self.init_weights()\n\n# %% definition of activation functions and derivatives\n @staticmethod\n def sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n @staticmethod\n def dsigmoid(z):\n return MLP.sigmoid(z) * (1 - MLP.sigmoid(z))\n\n @staticmethod\n def dtanh(z):\n return 1 - np.tanh(z)**2\n\n @staticmethod\n def relu(z):\n return np.maximum(z, 0)\n\n @staticmethod\n def drelu(z):\n z[z >= 0] = 1 # drelu(0)=1 by agreement\n z[z < 0] = 0\n return z\n\n @staticmethod\n def identity(z):\n return z\n\n @staticmethod\n def didentity(z): # it only works with numpy arrays\n return [1] * z.shape[0]\n\n @staticmethod\n def softmax(z):\n sum_exp = np.sum(np.exp(z))\n return np.exp(z) / sum_exp\n\n # %% cost functions\n @staticmethod\n def binary_cross_entropy(y, t_data):\n return -np.sum(t_data * np.log(y) + (1 - t_data) * np.log(1 - y))\n\n @staticmethod\n def softmax_cross_entropy(y, t_data):\n return -np.sum(t_data * np.log(y))\n\n @staticmethod\n def cost_L2(y, t_data):\n return 0.5 * np.sum((y - t_data)**2)\n\n # %% simple weights initialization\n\n def init_weights(self):\n\n if self.init_seed:\n np.random.seed(self.seed)\n\n weights_list = []\n biases_list = []\n\n for layer in range(self.nb_layers):\n new_W = np.random.randn(self.K_list[layer], self.K_list[layer + 1])\n new_b = np.zeros(self.K_list[layer + 1])\n weights_list.append(new_W)\n biases_list.append(new_b)\n\n self.weights_list = weights_list\n self.biases_list = biases_list\n\n # %% feed forward pass\n # x = (N,D0) matrix\n def get_activations_and_units(self, x):\n\n activations = [x]\n units = [x]\n z = x\n for i in range(self.nb_layers):\n # matrix + row vector, so it adds the vector to each of the matrix\n # rows\n a = z.dot(self.weights_list[i]) + self.biases_list[i]\n activations.append(a)\n z = self.activation_functions[i](a)\n units.append(z)\n\n self.activations = activations\n self.units = units\n self.y = z\n\n # %% backpropagation\n # This function calculates the error gradient for each of the data and\n # averages them. All the gradients are calculated at the same time using\n # (N,?) matrix instead of vectors.\n # We use : x = (N,D0) matrix, t = (N,Dr) matrix, delta_k = (N,Dk) matrix\n def get_gradients(self, x, t, beta=0):\n\n # Slightly different from the class notes due to the separation of bs\n # and Ws and the change of the index to name the weights.\n # The functions returns a list of shifted index (k-th index = (k+1)-th\n # layer gradients; the layer 0 (input) has no Ws)\n\n self.get_activations_and_units(x)\n\n N = x.shape[0]\n grad_w_list = [0] * self.nb_layers\n grad_b_list = [0] * self.nb_layers\n\n delta_k1 = None # delta value for the next layer\n\n ks = range(1, self.nb_layers + 1)\n ks.reverse()\n for k in ks: # r, ..., 1\n\n # we calculate the new delta values\n if (k < self.nb_layers):\n # weights of the (k+1)-th layer\n w = self.weights_list[k]\n # activation function derivative on layer k\n dh = self.diff_activation_functions[k - 1]\n # activations from layer k\n a = self.activations[k]\n delta_k = (delta_k1.dot(w.T)) * dh(a)\n else:\n # we can assume the derivative of En respect to the last\n # activations layer is y-t\n delta_k = self.y - t\n\n grad_wk = (np.einsum(\n 'ij,ik', self.units[k - 1], delta_k) / N) + (beta * self.weights_list[k - 1])\n grad_w_list[k - 1] = grad_wk\n\n grad_bk = np.sum(delta_k, axis=0) / N\n grad_b_list[k - 1] = grad_bk\n\n delta_k1 = delta_k\n\n ##\n\n self.grad_w_list = grad_w_list\n self.grad_b_list = grad_b_list\n\n # %%\n # training method for the neuron\n def train(self, x_data, t_data,\n epochs, batch_size,\n initialize_weights=False,\n method='SGD',\n epsilon=0.01,\n beta=0,\n gamma=0.9,\n print_cost=False):\n\n opt = mlpo.Optimizer(self, method, epsilon, beta, gamma)\n\n if initialize_weights:\n self.init_weights()\n\n nb_data = x_data.shape[0]\n index_list = np.arange(nb_data)\n nb_batches = int(nb_data / batch_size)\n\n for _ in range(epochs):\n np.random.shuffle(index_list)\n for batch in range(nb_batches):\n indexes = index_list[batch *\n batch_size:(batch + 1) * batch_size]\n opt.run(x_data[indexes], t_data[indexes])\n\n if print_cost:\n x_batch = x_data\n t_batch = t_data\n self.get_activations_and_units(x_batch)\n if self.activation_functions[-1] == MLP.sigmoid:\n sys.stdout.write('cost = %f\\r' %\n MLP.binary_cross_entropy(self.y, t_batch))\n sys.stdout.flush()\n elif self.activation_functions[-1] == MLP.softmax:\n sys.stdout.write('cost = %f\\r' %\n MLP.softmax_cross_entropy(\n self.y, t_batch))\n sys.stdout.flush()\n else:\n sys.stdout.write('cost = %f\\r' %\n MLP.cost_L2(self.y, t_batch))\n sys.stdout.flush()\n\n# %% let's experiment\n\n\nif __name__ == '__main__':\n\n # %% Create data\n # np.random.seed(5)\n nb_black = 50\n nb_red = 50\n nb_data = nb_black + nb_red\n x_data_black = np.random.randn(nb_black, 2) + np.array([0, 0])\n x_data_red = np.random.randn(nb_red, 2) + np.array([10, 10])\n\n x_data = np.vstack((x_data_black, x_data_red))\n t_data = np.asarray([0] * nb_black + [1] * nb_red).reshape(nb_data, 1)\n\n# %% Net structure\n D = x_data.shape[1] # initial dimension\n K = 1 # final dimension\n\n K_list = [D, K] # list of dimensions\n\n activation_functions = [MLP.sigmoid]\n diff_activation_functions = [MLP.dsigmoid]\n\n\n# %%\n mlp = MLP(K_list, activation_functions, diff_activation_functions)\n\n\n# %% Train begins\n mlp.train(x_data, t_data,\n epochs=1000, batch_size=10, initialize_weights=False, method='Nesterov', epsilon=0.1,\n print_cost=True)\n" ]
[ [ "numpy.log", "numpy.maximum", "numpy.random.seed", "numpy.einsum", "numpy.asarray", "numpy.arange", "numpy.random.shuffle", "numpy.random.randn", "numpy.exp", "numpy.tanh", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
KyunghyunLee/TSSL-BP
[ "4ae779cc4cf566adb5e05e7f02dd7de56642d087" ]
[ "functions/tsslbp.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as f\r\nfrom time import time \r\nimport global_v as glv\r\n\r\n\r\nclass TSSLBP(torch.autograd.Function): \r\n @staticmethod\r\n def forward(ctx, inputs, network_config, layer_config):\r\n shape = inputs.shape\r\n n_steps = shape[4] \r\n theta_m = 1/network_config['tau_m']\r\n tau_s = network_config['tau_s']\r\n theta_s = 1/tau_s\r\n threshold = layer_config['threshold']\r\n\r\n mem = torch.zeros(shape[0], shape[1], shape[2], shape[3]).cuda()\r\n syn = torch.zeros(shape[0], shape[1], shape[2], shape[3]).cuda()\r\n syns_posts = []\r\n mems = []\r\n mem_updates = []\r\n outputs = []\r\n for t in range(n_steps):\r\n mem_update = (-theta_m) * mem + inputs[..., t]\r\n mem += mem_update\r\n\r\n out = mem > threshold\r\n out = out.type(torch.float32)\r\n\r\n mems.append(mem)\r\n\r\n mem = mem * (1-out)\r\n outputs.append(out)\r\n mem_updates.append(mem_update)\r\n syn = syn + (out - syn) * theta_s\r\n syns_posts.append(syn)\r\n\r\n mems = torch.stack(mems, dim = 4)\r\n mem_updates = torch.stack(mem_updates, dim = 4)\r\n outputs = torch.stack(outputs, dim = 4)\r\n syns_posts = torch.stack(syns_posts, dim = 4)\r\n ctx.save_for_backward(mem_updates, outputs, mems, syns_posts, torch.tensor([threshold, tau_s, theta_m]))\r\n\r\n return syns_posts\r\n\r\n @staticmethod\r\n def backward(ctx, grad_delta):\r\n (delta_u, outputs, u, syns, others) = ctx.saved_tensors\r\n shape = grad_delta.shape\r\n n_steps = shape[4]\r\n threshold = others[0].item()\r\n tau_s = others[1].item()\r\n theta_m = others[2].item()\r\n\r\n th = 1/(4 * tau_s)\r\n\r\n grad = torch.zeros_like(grad_delta)\r\n\r\n syn_a = glv.syn_a.repeat(shape[0], shape[1], shape[2], shape[3], 1)\r\n partial_a = glv.syn_a/(-tau_s)\r\n partial_a = partial_a.repeat(shape[0], shape[1], shape[2], shape[3], 1)\r\n\r\n o = torch.zeros(shape[0], shape[1], shape[2], shape[3]).cuda()\r\n \r\n theta = torch.zeros(shape[0], shape[1], shape[2], shape[3]).cuda()\r\n for t in range(n_steps-1, -1, -1): \r\n time_end = n_steps\r\n time_len = time_end-t\r\n\r\n out = outputs[..., t]\r\n\r\n partial_u = torch.clamp(-1/delta_u[..., t], -8, 0) * out\r\n \r\n # current time is t_m \r\n partial_a_partial_u = partial_u.unsqueeze(-1).repeat(1, 1, 1, 1, time_len) * partial_a[..., 0:time_len]\r\n\r\n grad_tmp = torch.sum(partial_a_partial_u*grad_delta[..., t:time_end]*tau_s, dim=4) \r\n\r\n if t!=n_steps-1:\r\n grad_tmp += theta * u[..., t] * (-1) * theta_m * partial_u\r\n grad_tmp += theta * (1-theta_m) * (1-out)\r\n \r\n # current time is t_p\r\n theta = grad_tmp * out + theta * (1-out) * (1-theta_m)\r\n\r\n grad_a = torch.sum(syn_a[..., 0:time_len]*grad_delta[..., t:time_end], dim=-1)\r\n\r\n a = 0.2\r\n f = torch.clamp((-1 * u[..., t] + threshold) / a, -8, 8)\r\n f = torch.exp(f)\r\n f = f / ((1 + f) * (1 + f) * a)\r\n\r\n grad_a = grad_a * f\r\n\r\n syn = syns[..., t]\r\n\r\n grad_tmp[syn<th] = grad_a[syn<th]\r\n\r\n grad[..., t] = grad_tmp\r\n\r\n return grad, None, None\r\n \r\n" ]
[ [ "torch.zeros", "torch.sum", "torch.zeros_like", "torch.tensor", "torch.exp", "torch.stack", "torch.clamp" ] ]
mafemorris/pennylane
[ "49655591618af994e18d9558e1dbf23cf9da6ffd" ]
[ "pennylane/gradients/finite_difference.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nThis module contains functions for computing the finite-difference gradient\r\nof a quantum tape.\r\n\"\"\"\r\n# pylint: disable=protected-access,too-many-arguments\r\nimport functools\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom scipy.special import factorial\r\n\r\nimport pennylane as qml\r\n\r\nfrom .gradient_transform import gradient_transform\r\n\r\n\r\[email protected]_cache(maxsize=None)\r\ndef finite_diff_coeffs(n, approx_order, strategy):\r\n r\"\"\"Generate the finite difference shift values and corresponding\r\n term coefficients for a given derivative order, approximation accuracy,\r\n and strategy.\r\n\r\n Args:\r\n n (int): Positive integer specifying the order of the derivative. For example, ``n=1``\r\n corresponds to the first derivative, ``n=2`` the second derivative, etc.\r\n approx_order (int): Positive integer referring to the approximation order of the\r\n returned coefficients, e.g., ``approx_order=1`` corresponds to the\r\n first-order approximation to the derivative.\r\n strategy (str): One of ``\"forward\"``, ``\"center\"``, or ``\"backward\"``.\r\n For the ``\"forward\"`` strategy, the finite-difference shifts occur at the points\r\n :math:`x_0, x_0+h, x_0+2h,\\dots`, where :math:`h` is some small\r\n step size. The ``\"backwards\"`` strategy is similar, but in\r\n reverse: :math:`x_0, x_0-h, x_0-2h, \\dots`. Finally, the\r\n ``\"center\"`` strategy results in shifts symmetric around the\r\n unshifted point: :math:`\\dots, x_0-2h, x_0-h, x_0, x_0+h, x_0+2h,\\dots`.\r\n\r\n Returns:\r\n array[float]: A ``(2, N)`` array. The first row corresponds to the\r\n coefficients, and the second row corresponds to the shifts.\r\n\r\n **Example**\r\n\r\n >>> finite_diff_coeffs(n=1, approx_order=1, strategy=\"forward\")\r\n array([[-1., 1.],\r\n [ 0., 1.]])\r\n\r\n For example, this results in the linear combination:\r\n\r\n .. math:: \\frac{-y(x_0) + y(x_0 + h)}{h}\r\n\r\n where :math:`h` is the finite-difference step size.\r\n\r\n More examples:\r\n\r\n >>> finite_diff_coeffs(n=1, approx_order=2, strategy=\"center\")\r\n array([[-0.5, 0.5],\r\n [-1. , 1. ]])\r\n >>> finite_diff_coeffs(n=2, approx_order=2, strategy=\"center\")\r\n array([[-2., 1., 1.],\r\n [ 0., -1., 1.]])\r\n\r\n **Details**\r\n\r\n Consider a function :math:`y(x)`. We wish to approximate the :math:`n`-th\r\n derivative at point :math:`x_0`, :math:`y^{(n)}(x_0)`, by sampling the function\r\n at :math:`N<n` distinct points:\r\n\r\n .. math:: y^{(n)}(x_0) \\approx \\sum_{i=1}^N c_i y(x_i)\r\n\r\n where :math:`c_i` are coefficients, and :math:`x_i=x_0 + s_i` are the points we sample\r\n the function at.\r\n\r\n Consider the Taylor expansion of :math:`y(x_i)` around the point :math:`x_0`:\r\n\r\n .. math::\r\n\r\n y^{(n)}(x_0) \\approx \\sum_{i=1}^N c_i y(x_i)\r\n &= \\sum_{i=1}^N c_i \\left[ y(x_0) + y'(x_0)(x_i-x_0) + \\frac{1}{2} y''(x_0)(x_i-x_0)^2 + \\cdots \\right]\\\\\r\n & = \\sum_{j=0}^m y^{(j)}(x_0) \\left[\\sum_{i=1}^N \\frac{c_i s_i^j}{j!} + \\mathcal{O}(s_i^m) \\right],\r\n\r\n where :math:`s_i = x_i-x_0`. For this approximation to be satisfied, we must therefore have\r\n\r\n .. math::\r\n\r\n \\sum_{i=1}^N s_i^j c_i = \\begin{cases} j!, &j=n\\\\ 0, & j\\neq n\\end{cases}.\r\n\r\n Thus, to determine the coefficients :math:`c_i \\in \\{c_1, \\dots, c_N\\}` for particular\r\n shift values :math:`s_i \\in \\{s_1, \\dots, s_N\\}` and derivative order :math:`n`,\r\n we must solve this linear system of equations.\r\n \"\"\"\r\n if n < 1 or not isinstance(n, int):\r\n raise ValueError(\"Derivative order n must be a positive integer.\")\r\n\r\n if approx_order < 1 or not isinstance(approx_order, int):\r\n raise ValueError(\"Approximation order must be a positive integer.\")\r\n\r\n num_points = approx_order + 2 * np.floor((n + 1) / 2) - 1\r\n N = num_points + 1 if n % 2 == 0 else num_points\r\n\r\n if strategy == \"forward\":\r\n shifts = np.arange(N, dtype=np.float64)\r\n\r\n elif strategy == \"backward\":\r\n shifts = np.arange(-N + 1, 1, dtype=np.float64)\r\n\r\n elif strategy == \"center\":\r\n if approx_order % 2 != 0:\r\n raise ValueError(\"Centered finite-difference requires an even order approximation.\")\r\n\r\n N = num_points // 2\r\n shifts = np.arange(-N, N + 1, dtype=np.float64)\r\n\r\n else:\r\n raise ValueError(\r\n f\"Unknown strategy {strategy}. Must be one of 'forward', 'backward', 'center'.\"\r\n )\r\n\r\n # solve for the coefficients\r\n A = shifts ** np.arange(len(shifts)).reshape(-1, 1)\r\n b = np.zeros_like(shifts)\r\n b[n] = factorial(n)\r\n coeffs = np.linalg.solve(A, b)\r\n\r\n coeffs_and_shifts = np.stack([coeffs, shifts])\r\n\r\n # remove all small coefficients and shifts\r\n coeffs_and_shifts[np.abs(coeffs_and_shifts) < 1e-10] = 0\r\n\r\n # remove columns where the coefficients are 0\r\n coeffs_and_shifts = coeffs_and_shifts[:, ~np.all(coeffs_and_shifts == 0, axis=0)]\r\n\r\n # sort columns in ascending order according to abs(shift)\r\n coeffs_and_shifts = coeffs_and_shifts[:, np.argsort(np.abs(coeffs_and_shifts)[1])]\r\n return coeffs_and_shifts\r\n\r\n\r\ndef generate_shifted_tapes(tape, idx, shifts, multipliers=None):\r\n r\"\"\"Generate a list of tapes where the corresponding trainable parameter\r\n index has been shifted by the values given.\r\n\r\n Args:\r\n tape (.QuantumTape): input quantum tape\r\n idx (int): trainable parameter index to shift the parameter of\r\n shifts (Sequence[float or int]): sequence of shift values\r\n multipliers (Sequence[float or int]): Sequence of multiplier values to\r\n scale the parameter by. If not provided, the parameter will\r\n not be scaled.\r\n\r\n Returns:\r\n list[QuantumTape]: List of quantum tapes. Each tape has parameter\r\n ``idx`` shifted by consecutive values of ``shift``. The length\r\n of the returned list of tapes will match the length of ``shifts``.\r\n \"\"\"\r\n params = list(tape.get_parameters())\r\n tapes = []\r\n\r\n for i, s in enumerate(shifts):\r\n new_params = params.copy()\r\n shifted_tape = tape.copy(copy_operations=True)\r\n\r\n if multipliers is not None:\r\n m = multipliers[i]\r\n new_params[idx] = new_params[idx] * qml.math.convert_like(m, new_params[idx])\r\n\r\n new_params[idx] = new_params[idx] + qml.math.convert_like(s, new_params[idx])\r\n shifted_tape.set_parameters(new_params)\r\n tapes.append(shifted_tape)\r\n\r\n return tapes\r\n\r\n\r\n@gradient_transform\r\ndef finite_diff(\r\n tape,\r\n argnum=None,\r\n h=1e-7,\r\n approx_order=1,\r\n n=1,\r\n strategy=\"forward\",\r\n f0=None,\r\n validate_params=True,\r\n):\r\n r\"\"\"Transform a QNode to compute the finite-difference gradient of all gate\r\n parameters with respect to its inputs.\r\n\r\n Args:\r\n qnode (pennylane.QNode or .QuantumTape): quantum tape or QNode to differentiate\r\n argnum (int or list[int] or None): Trainable parameter indices to differentiate\r\n with respect to. If not provided, the derivatives with respect to all\r\n trainable parameters are returned.\r\n h (float): finite difference method step size\r\n approx_order (int): The approximation order of the finite-difference method to use.\r\n n (int): compute the :math:`n`-th derivative\r\n strategy (str): The strategy of the finite difference method. Must be one of\r\n ``\"forward\"``, ``\"center\"``, or ``\"backward\"``.\r\n For the ``\"forward\"`` strategy, the finite-difference shifts occur at the points\r\n :math:`x_0, x_0+h, x_0+2h,\\dots`, where :math:`h` is some small\r\n stepsize. The ``\"backwards\"`` strategy is similar, but in\r\n reverse: :math:`x_0, x_0-h, x_0-2h, \\dots`. Finally, the\r\n ``\"center\"`` strategy results in shifts symmetric around the\r\n unshifted point: :math:`\\dots, x_0-2h, x_0-h, x_0, x_0+h, x_0+2h,\\dots`.\r\n f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided,\r\n and the gradient recipe contains an unshifted term, this value is used,\r\n saving a quantum evaluation.\r\n validate_params (bool): Whether to validate the tape parameters or not. If ``True``,\r\n the ``Operation.grad_method`` attribute and the circuit structure will be analyzed\r\n to determine if the trainable parameters support the finite-difference method.\r\n If ``False``, the finite-difference method will be applied to all parameters.\r\n\r\n Returns:\r\n tensor_like or tuple[list[QuantumTape], function]:\r\n\r\n - If the input is a QNode, a tensor\r\n representing the output Jacobian matrix of size ``(number_outputs, number_gate_parameters)``\r\n is returned.\r\n\r\n - If the input is a tape, a tuple containing a list of generated tapes,\r\n in addition to a post-processing function to be applied to the\r\n evaluated tapes.\r\n\r\n **Example**\r\n\r\n This transform can be registered directly as the quantum gradient transform\r\n to use during autodifferentiation:\r\n\r\n >>> dev = qml.device(\"default.qubit\", wires=2)\r\n >>> @qml.qnode(dev, gradient_fn=qml.gradients.finite_diff)\r\n ... def circuit(params):\r\n ... qml.RX(params[0], wires=0)\r\n ... qml.RY(params[1], wires=0)\r\n ... qml.RX(params[2], wires=0)\r\n ... return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(0))\r\n >>> params = np.array([0.1, 0.2, 0.3], requires_grad=True)\r\n >>> qml.jacobian(circuit)(params)\r\n tensor([[-0.38751725, -0.18884792, -0.38355708],\r\n [ 0.69916868, 0.34072432, 0.69202365]], requires_grad=True)\r\n\r\n\r\n .. UsageDetails::\r\n\r\n This gradient transform can also be applied directly to :class:`QNode <pennylane.QNode>` objects:\r\n\r\n >>> @qml.qnode(dev)\r\n ... def circuit(params):\r\n ... qml.RX(params[0], wires=0)\r\n ... qml.RY(params[1], wires=0)\r\n ... qml.RX(params[2], wires=0)\r\n ... return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(0))\r\n >>> qml.gradients.finite_diff(circuit)(params)\r\n tensor([[-0.38751725, -0.18884792, -0.38355708],\r\n [ 0.69916868, 0.34072432, 0.69202365]], requires_grad=True)\r\n\r\n This quantum gradient transform can also be applied to low-level\r\n :class:`~.QuantumTape` objects. This will result in no implicit quantum\r\n device evaluation. Instead, the processed tapes, and post-processing\r\n function, which together define the gradient are directly returned:\r\n\r\n >>> with qml.tape.JacobianTape() as tape:\r\n ... qml.RX(params[0], wires=0)\r\n ... qml.RY(params[1], wires=0)\r\n ... qml.RX(params[2], wires=0)\r\n ... qml.expval(qml.PauliZ(0))\r\n ... qml.var(qml.PauliZ(0))\r\n >>> gradient_tapes, fn = qml.gradients.finite_diff(tape)\r\n >>> gradient_tapes\r\n [<JacobianTape: wires=[0, 1], params=3>,\r\n <JacobianTape: wires=[0, 1], params=3>,\r\n <JacobianTape: wires=[0, 1], params=3>,\r\n <JacobianTape: wires=[0, 1], params=3>]\r\n\r\n This can be useful if the underlying circuits representing the gradient\r\n computation need to be analyzed.\r\n\r\n The output tapes can then be evaluated and post-processed to retrieve\r\n the gradient:\r\n\r\n >>> dev = qml.device(\"default.qubit\", wires=2)\r\n >>> fn(qml.execute(gradient_tapes, dev, None))\r\n [[-0.38751721 -0.18884787 -0.38355704]\r\n [ 0.69916862 0.34072424 0.69202359]]\r\n \"\"\"\r\n if argnum is None and not tape.trainable_params:\r\n warnings.warn(\r\n \"Attempted to compute the gradient of a tape with no trainable parameters. \"\r\n \"If this is unintended, please mark trainable parameters in accordance with the \"\r\n \"chosen auto differentiation framework, or via the 'tape.trainable_params' property.\"\r\n )\r\n return [], lambda _: np.zeros([tape.output_dim, len(tape.trainable_params)])\r\n\r\n # TODO: replace the JacobianTape._grad_method_validation\r\n # functionality before deprecation.\r\n if validate_params:\r\n diff_methods = tape._grad_method_validation(\"numeric\")\r\n else:\r\n diff_methods = [\"F\" for i in tape.trainable_params]\r\n\r\n if all(g == \"0\" for g in diff_methods):\r\n return [], lambda _: np.zeros([tape.output_dim, len(tape.trainable_params)])\r\n\r\n gradient_tapes = []\r\n shapes = []\r\n c0 = None\r\n\r\n coeffs, shifts = finite_diff_coeffs(n=n, approx_order=approx_order, strategy=strategy)\r\n\r\n if 0 in shifts:\r\n # Finite difference formula includes a term with zero shift.\r\n\r\n if f0 is None:\r\n # Ensure that the unshifted tape is appended\r\n # to the gradient tapes, if not already.\r\n gradient_tapes.append(tape)\r\n\r\n # Store the unshifted coefficient. We know that\r\n # it will always be the first coefficient due to processing.\r\n c0 = coeffs[0]\r\n shifts = shifts[1:]\r\n coeffs = coeffs[1:]\r\n\r\n # TODO: replace the JacobianTape._choose_params_with_methods\r\n # functionality before deprecation.\r\n method_map = dict(tape._choose_params_with_methods(diff_methods, argnum))\r\n\r\n for i, _ in enumerate(tape.trainable_params):\r\n if i not in method_map or method_map[i] == \"0\":\r\n # parameter has zero gradient\r\n shapes.append(0)\r\n continue\r\n\r\n g_tapes = generate_shifted_tapes(tape, i, shifts * h)\r\n gradient_tapes.extend(g_tapes)\r\n shapes.append(len(g_tapes))\r\n\r\n def processing_fn(results):\r\n grads = []\r\n start = 1 if c0 is not None and f0 is None else 0\r\n r0 = f0 or results[0]\r\n\r\n for s in shapes:\r\n\r\n if s == 0:\r\n # parameter has zero gradient\r\n g = qml.math.zeros_like(results[0])\r\n grads.append(g)\r\n continue\r\n\r\n res = results[start : start + s]\r\n start = start + s\r\n\r\n # compute the linear combination of results and coefficients\r\n res = qml.math.stack(res)\r\n g = sum([c * r for c, r in zip(coeffs, res)])\r\n\r\n if c0 is not None:\r\n # add on the unshifted term\r\n g = g + c0 * r0\r\n\r\n grads.append(g / (h**n))\r\n\r\n # The following is for backwards compatibility; currently,\r\n # the device stacks multiple measurement arrays, even if not the same\r\n # size, resulting in a ragged array.\r\n # In the future, we might want to change this so that only tuples\r\n # of arrays are returned.\r\n for i, g in enumerate(grads):\r\n g = qml.math.convert_like(g, results[0])\r\n if hasattr(g, \"dtype\") and g.dtype is np.dtype(\"object\"):\r\n grads[i] = qml.math.hstack(g)\r\n\r\n return qml.math.T(qml.math.stack(grads))\r\n\r\n return gradient_tapes, processing_fn\r\n" ]
[ [ "numpy.linalg.solve", "numpy.abs", "numpy.arange", "numpy.stack", "numpy.dtype", "numpy.all", "numpy.zeros_like", "scipy.special.factorial", "numpy.floor" ] ]
Ovewh/DUST
[ "f54e3288419434407285eb652cb0b5b143ede8f9" ]
[ "DUST/plot/utils.py" ]
[ "import numpy as np\nfrom matplotlib.colors import ListedColormap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib as mpl\nfrom matplotlib.ticker import LogFormatterSciNotation\ndef _gen_log_clevs(dat_min, dat_max):\n \"\"\"Creates a logarithmic color scale.\"\"\"\n\n if dat_max > 0:\n dmx = int(np.round(np.log10(dat_max)))\n else:\n dmx = 1\n\n # TODO: What's the default value of dmn?\n if dat_min > 0:\n dmn = int(np.round(np.log10(dat_min)))\n elif dat_min == 0. or np.isnan(dat_min):\n dmn = dmx - 3\n\n # create equally spaced range\n # ERROR: dmn could be uninitialized\n if dmx == dmn:\n dmx = dmn + 1\n clevs = np.logspace(dmn, dmx, 100)\n\n return clevs\ndef _add_colorbar(im,cticks=None,label='', fmt='%d', log=False):\n \"\"\"Adds Colorbar Nicely to figure\"\"\"\n ax = im.axes\n fig = ax.figure\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"3.5%\", pad=0.05, axes_class=mpl.pyplot.Axes)\n if log:\n formatter = LogFormatterSciNotation(10, labelOnlyBase=False)\n else:\n formatter = fmt\n\n if isinstance(cticks,np.ndarray):\n fig.colorbar(im,ax=ax,cax=cax,label=label, format=formatter, ticks=cticks)\n else:\n fig.colorbar(im,ax=ax,cax=cax,label=label, format=formatter)\n\n\ndef _gen_flexpart_colormap(ctbfile=None, colors=None):\n \"\"\"Generate the ast colormap for FLEXPART.\"\"\"\n\n \n if ctbfile:\n try:\n colors = np.loadtxt(ctbfile)\n except:\n print(\"WARNING: cannot load ctbfile. using colors\")\n if colors:\n name = 'user_colormap'\n else:\n # AST Colorset for FLEXPART\n colors = [\n 1.0000000e+00, 1.0000000e+00, 1.0000000e+00,\n 9.9607843e-01, 9.1372549e-01, 1.0000000e+00,\n 9.8431373e-01, 8.2352941e-01, 1.0000000e+00,\n 9.6470588e-01, 7.1764706e-01, 1.0000000e+00,\n 9.3333333e-01, 6.0000000e-01, 1.0000000e+00,\n 8.9019608e-01, 4.4705882e-01, 1.0000000e+00,\n 8.3137255e-01, 2.0000000e-01, 1.0000000e+00,\n 7.5686275e-01, 0.0000000e+00, 1.0000000e+00,\n 6.6274510e-01, 0.0000000e+00, 1.0000000e+00,\n 5.4901961e-01, 0.0000000e+00, 1.0000000e+00,\n 4.0784314e-01, 0.0000000e+00, 1.0000000e+00,\n 2.4705882e-01, 0.0000000e+00, 1.0000000e+00,\n 7.4509804e-02, 0.0000000e+00, 1.0000000e+00,\n 0.0000000e+00, 2.8235294e-01, 1.0000000e+00,\n 0.0000000e+00, 4.8627451e-01, 1.0000000e+00,\n 0.0000000e+00, 6.3137255e-01, 1.0000000e+00,\n 0.0000000e+00, 7.4509804e-01, 1.0000000e+00,\n 0.0000000e+00, 8.4705882e-01, 1.0000000e+00,\n 0.0000000e+00, 9.3725490e-01, 1.0000000e+00,\n 0.0000000e+00, 1.0000000e+00, 9.7647059e-01,\n 0.0000000e+00, 1.0000000e+00, 8.9411765e-01,\n 0.0000000e+00, 1.0000000e+00, 8.0000000e-01,\n 0.0000000e+00, 1.0000000e+00, 6.9019608e-01,\n 0.0000000e+00, 1.0000000e+00, 5.6470588e-01,\n 0.0000000e+00, 1.0000000e+00, 4.0000000e-01,\n 0.0000000e+00, 1.0000000e+00, 0.0000000e+00,\n 3.9607843e-01, 1.0000000e+00, 0.0000000e+00,\n 5.6470588e-01, 1.0000000e+00, 0.0000000e+00,\n 6.9019608e-01, 1.0000000e+00, 0.0000000e+00,\n 7.9607843e-01, 1.0000000e+00, 0.0000000e+00,\n 8.9411765e-01, 1.0000000e+00, 0.0000000e+00,\n 9.7647059e-01, 1.0000000e+00, 0.0000000e+00,\n 1.0000000e+00, 9.4509804e-01, 0.0000000e+00,\n 1.0000000e+00, 8.7450980e-01, 0.0000000e+00,\n 1.0000000e+00, 7.9215686e-01, 0.0000000e+00,\n 1.0000000e+00, 7.0588235e-01, 0.0000000e+00,\n 1.0000000e+00, 6.0392157e-01, 0.0000000e+00,\n 1.0000000e+00, 4.8235294e-01, 0.0000000e+00,\n 1.0000000e+00, 3.1372549e-01, 0.0000000e+00,\n 1.0000000e+00, 0.0000000e+00, 1.4901961e-01,\n 1.0000000e+00, 0.0000000e+00, 3.3333333e-01,\n 1.0000000e+00, 0.0000000e+00, 4.4705882e-01,\n 1.0000000e+00, 0.0000000e+00, 5.3725490e-01,\n 1.0000000e+00, 0.0000000e+00, 6.1176471e-01,\n 9.7647059e-01, 0.0000000e+00, 6.6666667e-01,\n 8.9411765e-01, 0.0000000e+00, 6.6666667e-01,\n 7.9607843e-01, 0.0000000e+00, 6.3921569e-01,\n 6.9019608e-01, 0.0000000e+00, 5.9215686e-01,\n 5.6470588e-01, 0.0000000e+00, 5.0980392e-01,\n 3.9607843e-01, 0.0000000e+00, 3.8039216e-01]\n colors = np.reshape(colors, (-1, 3))\n name = 'flexpart_cmap'\n cmap = ListedColormap(colors, name)\n return cmap" ]
[ [ "numpy.logspace", "numpy.isnan", "numpy.reshape", "matplotlib.ticker.LogFormatterSciNotation", "numpy.log10", "matplotlib.colors.ListedColormap", "numpy.loadtxt" ] ]
lab-midas/tcia-lesions
[ "2b8e6e6971c1ab4066e4168d79be73c2a4041ce5" ]
[ "misc.py" ]
[ "import numpy as np\nimport nibabel as nib\nfrom nibabel.orientations import ornt_transform, axcodes2ornt, inv_ornt_aff, apply_orientation, io_orientation, aff2axcodes\nimport pydicom\n\ndef reorient_nifti(img,\n target_orientation=('L','A','S'),\n verbose=False):\n new_ornt = axcodes2ornt(target_orientation)\n vox_array = img.get_fdata()\n affine = img.affine\n orig_ornt = io_orientation(img.affine)\n ornt_trans = ornt_transform(orig_ornt, new_ornt)\n orig_shape = vox_array.shape\n new_vox_array = apply_orientation(vox_array, ornt_trans)\n aff_trans = inv_ornt_aff(ornt_trans, orig_shape)\n new_affine = np.dot(affine, aff_trans)\n if verbose:\n print(f'{aff2axcodes(affine)} -> {aff2axcodes(new_affine)}')\n new_img = nib.Nifti1Image(new_vox_array, new_affine, img.header)\n return new_img\n\n\ndef conv_time(time_str):\n return (float(time_str[:2]) * 3600 + float(time_str[2:4]) * 60 + float(time_str[4:13]))\n\n\ndef calculate_suv_factor(dcm_path):\n ds = pydicom.dcmread(str(dcm_path))\n total_dose = ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose\n start_time = ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime\n half_life = ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife\n acq_time = ds.AcquisitionTime\n weight = ds.PatientWeight\n time_diff = conv_time(acq_time) - conv_time(start_time)\n act_dose = total_dose * 0.5 ** (time_diff / half_life)\n suv_factor = 1000 * weight / act_dose\n return suv_factor\n\n\ndef convert_pet(pet, suv_factor=1.0):\n affine = pet.affine\n pet_data = pet.get_fdata()\n pet_suv_data = (pet_data*suv_factor).astype(np.float32)\n pet_suv = nib.Nifti1Image(pet_suv_data, affine)\n return pet_suv " ]
[ [ "numpy.dot" ] ]
Prem547/ga-learner-dsmp-repo
[ "3bd613f4da5e8c81342aff02cb4f3f37a1f76a30" ]
[ "Prem547-/-greyatom-python-for-data-science/code.py" ]
[ "# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\ndf = pd.read_csv(path)\nprint(df.shape)\nprint(df.head(5))\n\nX = df.loc[:, df.columns != 'list_price']\ny = df.iloc[:,1].values\nprint(X)\nprint(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=6)\n# code ends here\n#data.iloc[[0,3,6,24], [0,5,6]] # 1st, 4th, 7th, 25th row + 1st 6th 7th columns\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# code starts here \ncols = (X_train.columns)\nprint(cols.shape)\n#print(cols)\nfig, axes = plt.subplots(3,3, figsize=(20,10))\n\nfor i in range(0,3):\n for j in range(0,3):\n col = cols[i*3+j]\n axes[i,j].scatter(X_train[col],y_train)\n plt.show()\n# code ends here\n\n\n\n# --------------\n# Code starts here\ncorr = X_train.corr().abs()\nhigh_corr_var=np.where(corr>0.8)\n\n\n\nhigh_corr_var=[(corr.columns[X_train],corr.columns[y_train]) for X_train,y_train in zip(*high_corr_var) if X_train!=y_train and X_train<y_train]\n\nprint(high_corr_var)\n\n\n\n\nX_train.drop(['play_star_rating','val_star_rating'], 1 ,inplace=True)\nX_test.drop(['play_star_rating','val_star_rating'], 1 ,inplace=True)\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Code starts here\nregressor = LinearRegression()\n\nregressor.fit(X_train, y_train)\nprint(regressor)\n\ny_pred = regressor.predict(X_test)\nprint(y_pred)\n\nmse = mean_squared_error(y_test,y_pred) \nprint(mse)\n\nr2 = r2_score(y_test,y_pred)\nprint(r2)\n# Code ends here\n\n\n# --------------\n# Code starts here\nresidual = y_test - y_pred\n#print(residual)\n\nplt.hist(y_test, bins = 8)\nplt.xlabel(\"y_test - y_pred\")\nplt.ylabel(\"residual\")\nplt.show()\n\n# Code ends here\n\n\n" ]
[ [ "sklearn.cross_validation.train_test_split", "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
szrlee/vae-anomaly-detector
[ "f3406da47abd2b5db72d0be5f51910143da3fe9e" ]
[ "eval.py" ]
[ "\"\"\"\nMain experiment\n\"\"\"\nimport json\nimport os\nimport argparse\nimport torch\nimport numpy as np\nimport pickle\nfrom torch.utils.data import DataLoader\nfrom configparser import ConfigParser\nfrom datetime import datetime\nfrom scipy.special import logsumexp\n\nfrom vae.vae import VAE\nfrom utils.data import SpamDataset\nfrom utils.feature_extractor import FeatureExtractor\nfrom constants import MODELS\nfrom utils.visualization import mean_confidence_interval\n\n\ndef argparser():\n \"\"\"\n Command line argument parser\n \"\"\"\n parser = argparse.ArgumentParser(description='VAE spam detector')\n parser.add_argument('--model', type=str, required=True)\n parser.add_argument(\n '--globals', type=str, default='./configs/globals.ini', \n help=\"Path to the configuration file containing the global variables \"\n \"e.g. the paths to the data etc. See configs/globals.ini for an \"\n \"example.\"\n )\n parser.add_argument(\n '--config', type=str, default=None,\n help=\"Id of the model configuration file. If this argument is not null, \"\n \"the system will look for the configuration file \"\n \"./configs/{args.model}/{args.model}{args.config}.ini\"\n )\n parser.add_argument(\n '--restore_filename', type=str, required=True, \n help=\"Path to a model checkpoint containing trained parameters. \" \n \"If provided, the model will load the trained parameters before \"\n \"resuming training or making a prediction. By default, models are \"\n \"saved in ./checkpoints/<args.model><args.config>/<date>/\"\n )\n return parser.parse_args()\n\n\ndef load_config(args):\n \"\"\"\n Load .INI configuration files\n \"\"\"\n config = ConfigParser()\n\n # Load global variable (e.g. paths)\n config.read(args.globals)\n\n # Path to the directory containing the model configurations\n model_config_dir = os.path.join(config['paths']['configs_directory'], '{}/'.format(args.model))\n\n # Load default model configuration\n default_model_config_filename = '{}.ini'.format(args.model)\n default_model_config_path = os.path.join(model_config_dir, default_model_config_filename)\n config.read(default_model_config_path)\n\n if args.config:\n model_config_filename = '{}{}.ini'.format(args.model, args.config)\n model_config_path = os.path.join(model_config_dir, model_config_filename)\n config.read(model_config_path)\n\n config.set('model', 'device', 'cuda' if torch.cuda.is_available() else 'cpu')\n return config\n\ndef eval(config, testloader):\n storage = {\n # 'll_precision': None, 'll_recall': None, \n 'log_densities': None, 'params': None,\n 'ground_truth': None\n }\n input_dim = testloader.dataset.input_dim_\n vae = VAE(input_dim, config, checkpoint_directory=None)\n vae.to(config['model']['device'])\n if args.restore_filename is not None:\n vae.restore_model(args.restore_filename, epoch=None)\n vae.eval()\n precisions, recalls, all_log_densities = [], [], []\n # z sample sizes: 100\n for i in range(100):\n print(\"evaluation round {}\".format(i))\n _, _, precision, recall, log_densities, ground_truth = vae.evaluate(testloader)\n precisions.append(precision)\n recalls.append(recall)\n all_log_densities.append(np.expand_dims(log_densities, axis=1))\n print(mean_confidence_interval(precisions))\n print(mean_confidence_interval(recalls))\n all_log_densities = np.concatenate(all_log_densities, axis=1)\n # log sum exponential\n storage['log_densities'] = logsumexp(all_log_densities, axis=1) - np.log(100)\n storage['ground_truth'] = ground_truth\n # storage['ll_precision'] = mean_confidence_interval(precisions)\n # storage['ll_recall'] = mean_confidence_interval(recalls)\n # storage['params'] = self._get_parameters(testloader)\n pkl_filename = './results/test/{}{}/{}.pkl'.format(config['model']['name'], \\\n config['model']['config_id'], args.restore_filename)\n os.makedirs(os.path.dirname(pkl_filename), exist_ok=True)\n with open(pkl_filename, 'wb') as _f:\n pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n args = argparser()\n config = load_config(args)\n\n # Get data path\n data_dir = config.get(\"paths\", \"data_directory\")\n test_data_file_name = config.get(\"paths\", \"test_data_file_name\")\n test_csv_path = os.path.join(data_dir, test_data_file_name)\n data_file_name = config.get(\"paths\", \"data_file_name\")\n corpus_csv_path = os.path.join(data_dir, data_file_name)\n \n # Set text processing function\n transformer = FeatureExtractor(config)\n raw_documents = transformer.get_raw_documents(corpus_csv_path)\n transformer.fit(raw_documents)\n transformer.log_vocabulary('data/test_vocab.txt')\n\n test_data = SpamDataset(\n test_csv_path,\n label2int=json.loads(config.get(\"data\", \"label2int\")),\n transform=transformer.vectorize)\n\n # No shuffle data in testset: to guarentee the same order of predictions\n # from differemnt models.\n testloader = DataLoader(\n test_data,\n batch_size=config.getint(\"training\", \"batch_size\"),\n shuffle=False,\n num_workers=0,\n pin_memory=False)\n\n eval(config, testloader)\n" ]
[ [ "numpy.log", "numpy.expand_dims", "numpy.concatenate", "torch.cuda.is_available", "scipy.special.logsumexp" ] ]
TinCodes/hit-prediction-code
[ "86320656210882b948492e34f3b88fd5b1d7440e" ]
[ "config/linear_regression_ll_filtered.py" ]
[ "import dbispipeline.result_handlers as result_handlers\nfrom dbispipeline.evaluators import GridEvaluator\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport common\n\nfrom dataloaders import MsdBbLoader\n\nimport evaluations\n\ndataloader = MsdBbLoader(\n hits_file_path='/storage/nas3/datasets/music/billboard/msd_bb_matches.csv',\n non_hits_file_path=\n '/storage/nas3/datasets/music/billboard/msd_bb_non_matches.csv',\n features_path='/storage/nas3/datasets/music/billboard',\n non_hits_per_hit=1,\n features=[\n *common.ll_filterd_list(),\n ],\n label='peak',\n nan_value=150,\n random_state=42,\n)\n\npipeline = Pipeline([\n ('scale', MinMaxScaler()),\n ('linreg', LinearRegression()),\n])\n\nevaluator = GridEvaluator(\n parameters={},\n grid_parameters=evaluations.grid_parameters(),\n)\n\nresult_handlers = [\n result_handlers.print_gridsearch_results,\n]\n" ]
[ [ "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.MinMaxScaler" ] ]
zhangyong2/tensorflow_nlp
[ "4cc3cc4abec27526336897f1c62cf904b48f2676", "4cc3cc4abec27526336897f1c62cf904b48f2676" ]
[ "nlp/segment/joint_bilstm_crf/model.py", "nlp/segment/idcnn/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport time\nimport pickle\nimport numpy as np\nimport random\n\nfrom nlp.segment.joint_bilstm_crf import losses\nfrom nlp.segment.joint_bilstm_crf.dataset import data_utils\nfrom nlp.segment.joint_bilstm_crf.layers import EmbeddingLayer, HiddenLayer, TimeDistributed, BiLSTM, Forward\nimport nlp.segment.joint_bilstm_crf.batch as Batch\n\n\nclass Model(object):\n def __init__(self, nums_chars, nums_tags, buckets_char, counts=None, batch_size=10,\n tag_scheme='BIES', word_vec=True, crf=1, ngram=None):\n self.nums_chars = nums_chars\n self.nums_tags = nums_tags\n self.buckets_char = buckets_char\n self.counts = counts\n self.tag_scheme = tag_scheme\n self.word_vec = word_vec\n self.crf = crf\n self.ngram = ngram\n self.emb_layer = None\n self.gram_layers = []\n self.batch_size = batch_size\n self.l_rate = None\n self.decay = None\n self.train_step = None\n self.saver = None\n self.decode_holders = None\n self.scores = None\n self.params = None\n self.updates = []\n self.bucket_dit = {}\n self.input_v = []\n self.input_w = []\n self.input_p = None\n self.output = []\n self.output_ = []\n self.output_p = []\n self.output_w = []\n self.output_w_ = []\n if self.crf > 0:\n self.transition_char = []\n for i in range(len(self.nums_tags)):\n self.transition_char.append( # 转移矩阵\n tf.get_variable('transitions_char' + str(i), [self.nums_tags[i] + 1, self.nums_tags[i] + 1]))\n\n while len(self.buckets_char) > len(self.counts):\n self.counts.append(1)\n\n self.real_batches = data_utils.get_real_batch(self.counts, self.batch_size)\n\n def model_graph(self, trained_model, scope, emb_dim, gru, rnn_dim, rnn_num, drop_out=0.5,\n emb=None, ng_embs=None):\n if trained_model is not None:\n param_dic = {}\n param_dic['nums_chars'] = self.nums_chars\n param_dic['nums_tags'] = self.nums_tags\n param_dic['tag_scheme'] = self.tag_scheme\n param_dic['word_vec'] = self.word_vec\n param_dic['crf'] = self.crf\n param_dic['emb_dim'] = emb_dim\n param_dic['gru'] = gru\n param_dic['rnn_dim'] = rnn_dim\n param_dic['rnn_num'] = rnn_num\n param_dic['drop_out'] = drop_out\n param_dic['buckets_char'] = self.buckets_char\n param_dic['ngram'] = self.ngram\n # print param_dic\n f_model = open(trained_model, 'wb')\n pickle.dump(param_dic, f_model)\n f_model.close()\n\n dr = tf.placeholder(tf.float32, [], name='drop_out_holder')\n self.drop_out = dr\n self.drop_out_v = drop_out\n if self.word_vec:\n self.emb_layer = EmbeddingLayer(self.nums_chars + 500, emb_dim, weights=emb, name='emb_layer')\n\n if self.ngram is not None:\n if ng_embs is not None:\n assert len(ng_embs) == len(self.ngram)\n else:\n ng_embs = [None for _ in range(len(self.ngram))]\n for i, n_gram in enumerate(self.ngram):\n self.gram_layers.append(EmbeddingLayer(n_gram + 1000 * (i + 2), emb_dim, weights=ng_embs[i],\n name=str(i + 2) + 'gram_layer'))\n\n with tf.variable_scope('BiRNN'):\n if gru:\n fw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)\n bw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)\n else:\n fw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)\n bw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)\n\n if rnn_num > 1:\n fw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([fw_rnn_cell] * rnn_num, state_is_tuple=True)\n bw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([bw_rnn_cell] * rnn_num, state_is_tuple=True)\n\n output_wrapper = TimeDistributed(HiddenLayer(rnn_dim * 2, self.nums_tags[0], activation='linear', name='hidden'), name='wrapper')\n\n #define model for each bucket\n for idx, bucket in enumerate(self.buckets_char):\n if idx == 1:\n scope.reuse_variables()\n t1 = time.time()\n\n input_v = tf.placeholder(tf.int32, [None, bucket], name='input_' + str(bucket))\n\n self.input_v.append([input_v])\n\n emb_set = []\n\n if self.word_vec:\n word_out = self.emb_layer(input_v)\n emb_set.append(word_out)\n\n if self.ngram is not None:\n for i in range(len(self.ngram)):\n input_g = tf.placeholder(tf.int32, [None, bucket], name='input_g' + str(i) + str(bucket))\n self.input_v[-1].append(input_g)\n gram_out = self.gram_layers[i](input_g)\n emb_set.append(gram_out)\n\n if len(emb_set) > 1:\n emb_out = tf.concat(emb_set, axis=2)\n emb_out = tf.unstack(emb_out)\n else:\n emb_out = emb_set[0]\n\n rnn_out = BiLSTM(rnn_dim, fw_cell=fw_rnn_cell, bw_cell=bw_rnn_cell, p=dr,\n name='BiLSTM' + str(bucket), scope='BiRNN')(emb_out, input_v)\n\n output = output_wrapper(rnn_out)\n output_c = tf.stack(output, axis=1)\n self.output.append([output_c])\n self.output_.append([tf.placeholder(tf.int32, [None, bucket], name='tags' + str(bucket))])\n self.bucket_dit[bucket] = idx\n print('Bucket %d, %f seconds' % (idx + 1, time.time() - t1))\n\n assert len(self.input_v) == len(self.output) and len(self.output) == len(self.output_) and len(\n self.output) == len(self.counts)\n self.params = tf.trainable_variables()\n self.saver = tf.train.Saver()\n\n def config(self, optimizer, decay, lr_v=None, momentum=None, clipping=False, max_gradient_norm=5.0):\n self.decay = decay\n print('Training preparation...')\n\n print('Defining loss...')\n loss = []\n if self.crf > 0:\n loss_function = losses.crf_loss\n for i in range(len(self.input_v)):\n bucket_loss = losses.loss_wrapper(self.output[i], self.output_[i], loss_function,\n transitions=self.transition_char, nums_tags=self.nums_tags,\n batch_size=self.real_batches[i])\n loss.append(bucket_loss)\n else:\n loss_function = losses.sparse_cross_entropy\n for output, output_ in zip(self.output, self.output_):\n bucket_loss = losses.loss_wrapper(output, output_, loss_function)\n loss.append(bucket_loss)\n\n l_rate = tf.placeholder(tf.float32, [], name='learning_rate_holder')\n self.l_rate = l_rate\n\n if optimizer == 'sgd':\n if momentum is None:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=l_rate)\n else:\n optimizer = tf.train.MomentumOptimizer(learning_rate=l_rate, momentum=momentum)\n elif optimizer == 'adagrad':\n assert lr_v is not None\n optimizer = tf.train.AdagradOptimizer(learning_rate=l_rate)\n elif optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=l_rate)\n else:\n raise Exception('optimiser error')\n\n self.train_step = []\n\n print('Computing gradients...')\n\n for idx, l in enumerate(loss):\n\n t2 = time.time()\n with tf.variable_scope(tf.get_variable_scope(), reuse=False):\n if clipping:\n gradients = tf.gradients(l, self.params)\n clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)\n train_step = optimizer.apply_gradients(zip(clipped_gradients, self.params))\n else:\n train_step = optimizer.minimize(l)\n\n print('Bucket %d, %f seconds' % (idx + 1, time.time() - t2))\n self.train_step.append(train_step)\n\n def decode_graph(self):\n self.decode_holders = []\n self.scores = []\n for bucket in self.buckets_char:\n decode_holders = []\n scores = []\n for nt in self.nums_tags:\n ob = tf.placeholder(tf.float32, [None, bucket, nt])\n trans = tf.placeholder(tf.float32, [nt + 1, nt + 1])\n nums_steps = ob.get_shape().as_list()[1]\n length = tf.placeholder(tf.int32, [None])\n b_size = tf.placeholder(tf.int32, [])\n small = -1000\n class_pad = tf.stack(small * tf.ones([b_size, nums_steps, 1]))\n observations = tf.concat([ob, class_pad], axis=2)\n b_vec = tf.tile(([small] * nt + [0]), [b_size])\n b_vec = tf.cast(b_vec, tf.float32)\n b_vec = tf.reshape(b_vec, [b_size, 1, -1])\n e_vec = tf.tile(([0] + [small] * nt), [b_size])\n e_vec = tf.cast(e_vec, tf.float32)\n e_vec = tf.reshape(e_vec, [b_size, 1, -1])\n observations = tf.concat([b_vec, observations, e_vec], axis=1)\n transitions = tf.reshape(tf.tile(trans, [b_size, 1]), [b_size, nt + 1, nt + 1])\n observations = tf.reshape(observations, [-1, nums_steps + 2, nt + 1, 1])\n observations = tf.transpose(observations, [1, 0, 2, 3])\n previous = observations[0, :, :, :]\n max_scores = []\n max_scores_pre = []\n alphas = [previous]\n for t in range(1, nums_steps + 2):\n previous = tf.reshape(previous, [-1, nt + 1, 1])\n current = tf.reshape(observations[t, :, :, :], [-1, 1, nt + 1])\n alpha_t = previous + current + transitions\n max_scores.append(tf.reduce_max(alpha_t, reduction_indices=1))\n max_scores_pre.append(tf.argmax(alpha_t, axis=1))\n alpha_t = tf.reshape(Forward.log_sum_exp(alpha_t, axis=1), [-1, nt + 1, 1])\n alphas.append(alpha_t)\n previous = alpha_t\n max_scores = tf.stack(max_scores, axis=1)\n max_scores_pre = tf.stack(max_scores_pre, axis=1)\n decode_holders.append([ob, trans, length, b_size])\n scores.append((max_scores, max_scores_pre))\n self.decode_holders.append(decode_holders)\n self.scores.append(scores)\n\n def train(self, t_x, t_y, v_x, v_y, idx2tag, idx2char, sess, epochs, trained_model, lr=0.05, decay=0.05,\n decay_step=1):\n lr_r = lr\n best_epoch = 0\n best_score = 0\n\n best_seg = 0\n best_pos = 0\n\n v_y = data_utils.merge_bucket(v_y)\n v_y = data_utils.unpad_zeros(v_y)\n\n gold = data_utils.decode_tags(v_y, idx2tag, self.tag_scheme)\n\n input_chars = data_utils.merge_bucket([v_x[0]])\n\n chars = data_utils.decode_chars(input_chars[0], idx2char)\n\n gold_out = data_utils.generate_output(chars, gold, self.tag_scheme)\n\n for epoch in range(epochs):\n print('epoch: %d' % (epoch + 1))\n t = time.time()\n if epoch % decay_step == 0 and decay > 0:\n lr_r = lr / (1 + decay * (epoch / decay_step))\n\n data_list = t_x + t_y\n\n samples = list(zip(*data_list))\n\n random.shuffle(samples)\n\n for sample in samples:\n c_len = len(sample[0][0])\n idx = self.bucket_dit[c_len]\n real_batch_size = self.real_batches[idx]\n model = self.input_v[idx] + self.output_[idx]\n Batch.train(sess=sess[0], model=model, batch_size=real_batch_size, config=self.train_step[idx],\n lr=self.l_rate, lrv=lr_r, dr=self.drop_out, drv=self.drop_out_v, data=list(sample),\n verbose=False)\n\n predictions = []\n\n for v_b_x in zip(*v_x):\n c_len = len(v_b_x[0][0])\n idx = self.bucket_dit[c_len]\n\n b_prediction = self.predict(data=v_b_x, sess=sess, model=self.input_v[idx] + self.output[idx],\n index=idx, batch_size=100)\n b_prediction = data_utils.decode_tags(b_prediction, idx2tag, self.tag_scheme)\n predictions.append(b_prediction)\n\n predictions = zip(*predictions)\n predictions = data_utils.merge_bucket(predictions)\n\n prediction_out = data_utils.generate_output(chars, predictions, self.tag_scheme)\n\n scores = data_utils.evaluator(prediction_out, gold_out, tag_scheme=self.tag_scheme)\n scores = np.asarray(scores)\n\n c_score = np.max(scores[:, 1]) * np.max(scores[:, 0])\n if c_score > best_score and epoch > 4:\n best_epoch = epoch + 1\n best_score = c_score\n best_seg = np.max(scores[:, 0])\n best_pos = np.max(scores[:, 1])\n self.saver.save(sess[0], trained_model, write_meta_graph=False)\n print('Time consumed: %d seconds' % int(time.time() - t))\n print('Training is finished!')\n print('Best segmentation score: %f' % best_seg)\n print('Best POS tag score: %f' % best_pos)\n print('Best epoch: %d' % best_epoch)\n\n def predict(self, data, sess, model, index=None, argmax=True, batch_size=100, ensemble=None,\n verbose=False):\n if self.crf:\n assert index is not None\n predictions = Batch.predict(sess=sess[0], decode_sess=sess[1], model=model,\n transitions=self.transition_char, crf=self.crf, scores=self.scores[index],\n decode_holders=self.decode_holders[index], argmax=argmax, batch_size=batch_size,\n data=data, dr=self.drop_out, ensemble=ensemble,\n verbose=verbose)\n else:\n predictions = Batch.predict(sess=sess[0], model=model, crf=self.crf, argmax=argmax, batch_size=batch_size,\n data=data, dr=self.drop_out, ensemble=ensemble, verbose=verbose)\n return predictions\n\n def define_updates(self, new_chars, emb_path, char2idx, new_grams=None, ng_emb_path=None, gram2idx=None):\n self.nums_chars += len(new_chars)\n\n if self.word_vec and emb_path is not None:\n old_emb_weights = self.emb_layer.embeddings\n emb_dim = old_emb_weights.get_shape().as_list()[1]\n new_emb = data_utils.get_new_embeddings(new_chars, emb_dim, emb_path)\n n_emb_sh = new_emb.get_shape().as_list()\n if len(n_emb_sh) > 1:\n new_emb_weights = tf.concat([old_emb_weights[:len(char2idx) - len(new_chars)], new_emb,\n old_emb_weights[len(char2idx):]], axis=0)\n assign_op = old_emb_weights.assign(new_emb_weights)\n self.updates.append(assign_op)\n\n if self.ngram is not None and ng_emb_path is not None:\n old_gram_weights = [ng_layer.embeddings for ng_layer in self.gram_layers]\n ng_emb_dim = old_gram_weights[0].get_shape().as_list()[1]\n new_ng_emb = data_utils.get_new_ng_embeddings(new_grams, ng_emb_dim, ng_emb_path)\n for i in range(len(old_gram_weights)):\n new_ng_weight = tf.concat([old_gram_weights[i][:len(gram2idx[i]) - len(new_grams[i])], new_ng_emb[i],\n old_gram_weights[i][len(gram2idx[i]):]], axis=0)\n assign_op = old_gram_weights[i].assign(new_ng_weight)\n self.updates.append(assign_op)\n\n def run_updates(self, sess, weight_path):\n self.saver.restore(sess, weight_path)\n for op in self.updates:\n sess.run(op)\n print('Loaded.')\n\n def test(self, sess, t_x, t_y, idx2tag, idx2char, outpath=None, ensemble=None, batch_size=200):\n\n t_y = data_utils.unpad_zeros(t_y)\n gold = data_utils.decode_tags(t_y, idx2tag, self.tag_scheme)\n chars = data_utils.decode_chars(t_x[0], idx2char)\n gold_out = data_utils.generate_output(chars, gold, self.tag_scheme)\n\n prediction = self.predict(data=t_x, sess=sess, model=self.input_v[0] + self.output[0], index=0,\n ensemble=ensemble, batch_size=batch_size)\n prediction = data_utils.decode_tags(prediction, idx2tag, self.tag_scheme)\n prediction_out = data_utils.generate_output(chars, prediction, self.tag_scheme)\n\n scores = data_utils.evaluator(prediction_out, gold_out, tag_scheme=self.tag_scheme, verbose=True)\n\n scores = np.asarray(scores)\n scores_f = scores[:, 1]\n best_idx = int(np.argmax(scores_f))\n\n c_score = scores[0]\n\n print('Best scores: ')\n print('Segmentation F-score: %f' % c_score[0])\n print('Segmentation Precision: %f' % c_score[2])\n print('Segmentation Recall: %f\\n' % c_score[3])\n\n print('Joint POS tagging F-score: %f' % c_score[1])\n print('Joint POS tagging Precision: %f' % c_score[4])\n print('Joint POS tagging Recall: %f' % c_score[5])\n\n if outpath is not None:\n if self.tag_scheme == 'parallel':\n final_out = prediction_out[best_idx + 1]\n elif self.tag_scheme == 'mul':\n final_out = prediction_out[best_idx]\n else:\n final_out = prediction_out[0]\n data_utils.printer(final_out, outpath)\n\n def tag(self, sess, r_x, idx2tag, idx2char, expected_scheme='BIES', outpath='out.txt', ensemble=None,\n batch_size=200, large_file=False):\n\n chars = data_utils.decode_chars(r_x[0], idx2char)\n\n prediction = self.predict(data=r_x, sess=sess, model=self.input_v[0] + self.output[0], index=0,\n ensemble=ensemble, batch_size=batch_size)\n prediction = data_utils.decode_tags(prediction, idx2tag, self.tag_scheme)\n prediction_out = data_utils.generate_output(chars, prediction, self.tag_scheme)\n\n scheme2idx_short = {'BI': 1, 'BIE': 2, 'BIES': 3, 'Voting': 4}\n scheme2idx_long = {'BIES': 0, 'long': 1}\n\n if len(prediction_out) > 2:\n final_out = prediction_out[scheme2idx_short[expected_scheme]]\n elif len(prediction_out) == 2:\n final_out = prediction_out[scheme2idx_long[expected_scheme]]\n else:\n final_out = prediction_out[0]\n if large_file:\n return final_out\n else:\n data_utils.printer(final_out, outpath)\n", "# -*- coding:utf-8 -*-\n\nimport os\nimport json\nimport shutil\nimport logging\nimport codecs\n\nimport tensorflow as tf\nfrom nlp.segment.idcnn.conlleval import return_report\n\n\ndef get_logger(log_file):\n logger = logging.getLogger(log_file)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n ch.setFormatter(formatter)\n fh.setFormatter(formatter)\n logger.addHandler(ch)\n logger.addHandler(fh)\n return logger\n\n\ndef test_ner(results, path):\n \"\"\"\n Run perl script to evaluate model\n \"\"\"\n output_file = os.path.join(path, \"ner_predict.txt\")\n with codecs.open(output_file, \"w\", \"utf-8\") as f:\n to_write = []\n for block in results:\n for line in block:\n to_write.append(line + \"\\n\")\n to_write.append(\"\\n\")\n\n f.writelines(str(line) for line in to_write)\n eval_lines = return_report(output_file)\n return eval_lines\n\n\ndef print_config(config, logger):\n \"\"\"\n Print configuration of the model\n \"\"\"\n for k, v in config.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))\n\n\ndef make_path(params):\n \"\"\"\n Make folders for training and evaluation\n \"\"\"\n if not os.path.isdir(params.result_path):\n os.makedirs(params.result_path)\n if not os.path.isdir(params.ckpt_path):\n os.makedirs(params.ckpt_path)\n if not os.path.isdir(params.log_path):\n os.makedirs(params.log_path)\n if not os.path.isdir(params.vocab_path):\n os.makedirs(params.vocab_path)\n\n\ndef clean_and_make_path(params):\n \"\"\"\n Clean current folder\n remove saved model and training log\n \"\"\"\n if os.path.isdir(params.vocab_path):\n shutil.rmtree(params.vocab_path)\n os.mkdir(params.vocab_path)\n\n if os.path.isdir(params.ckpt_path):\n shutil.rmtree(params.ckpt_path)\n os.mkdir(params.ckpt_path)\n\n if os.path.isdir(params.result_path):\n shutil.rmtree(params.result_path)\n os.mkdir(params.result_path)\n\n if os.path.isdir(params.log_path):\n shutil.rmtree(params.log_path)\n os.mkdir(params.log_path)\n\n if os.path.isdir(\"__pycache__\"):\n shutil.rmtree(\"__pycache__\")\n\n if os.path.isdir(params.config_path):\n shutil.rmtree(params.config_path)\n os.mkdir(params.config_path)\n\n\ndef save_config(config, config_file):\n \"\"\"\n Save configuration of the model\n parameters are stored in json format\n \"\"\"\n with codecs.open(config_file, \"w\", encoding=\"utf8\") as f:\n json.dump(config, f, ensure_ascii=False, indent=4)\n\n\ndef load_config(config_file):\n \"\"\"\n Load configuration of the model\n parameters are stored in json format\n \"\"\"\n with codecs.open(config_file, encoding=\"utf8\") as f:\n return json.load(f)\n\n\ndef convert_to_text(line):\n \"\"\"\n Convert conll data to text\n \"\"\"\n to_print = []\n for item in line:\n\n try:\n if item[0] == \" \":\n to_print.append(\" \")\n continue\n word, gold, tag = item.split(\" \")\n if tag[0] in \"SB\":\n to_print.append(\"[\")\n to_print.append(word)\n if tag[0] in \"SE\":\n to_print.append(\"@\" + tag.split(\"-\")[-1])\n to_print.append(\"]\")\n except:\n print(list(item))\n return \"\".join(to_print)\n\n\ndef save_model(sess, model, path, logger):\n checkpoint_path = os.path.join(path, \"segment.ckpt\")\n model.saver.save(sess, checkpoint_path)\n logger.info(\"model saved\")\n\n\ndef create_model(session, Model_class, path, load_vec, config, id_to_char, logger):\n # create model, reuse parameters if exists\n model = Model_class(config)\n\n ckpt = tf.train.get_checkpoint_state(path)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n logger.info(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n logger.info(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n if config[\"pre_emb\"]:\n emb_weights = session.run(model.char_lookup.read_value())\n emb_weights = load_vec(config[\"emb_file\"],id_to_char, config[\"char_dim\"], emb_weights)\n session.run(model.char_lookup.assign(emb_weights))\n logger.info(\"Load pre-trained embedding.\")\n return model\n\n\ndef result_to_sentence(string, tags):\n item = []\n word_name = \"\"\n idx = 0\n for char, tag in zip(string, tags):\n if tag[0] == \"S\":\n item.append(char)\n elif tag[0] == \"B\":\n word_name += char\n elif tag[0] == \"M\":\n word_name += char\n elif tag[0] == \"E\":\n word_name += char\n item.append(word_name)\n word_name = \"\"\n else:\n word_name = \"\"\n idx += 1\n return item" ]
[ [ "tensorflow.concat", "numpy.asarray", "tensorflow.stack", "tensorflow.cast", "numpy.max", "tensorflow.train.AdamOptimizer", "tensorflow.gradients", "numpy.argmax", "tensorflow.train.MomentumOptimizer", "tensorflow.nn.rnn_cell.MultiRNNCell", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.nn.rnn_cell.GRUCell", "tensorflow.argmax", "tensorflow.tile", "tensorflow.train.AdagradOptimizer", "tensorflow.unstack", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.reduce_max", "tensorflow.transpose", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.reshape", "tensorflow.ones", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ], [ "tensorflow.train.get_checkpoint_state", "tensorflow.global_variables_initializer", "tensorflow.train.checkpoint_exists" ] ]
VirtualRaven/WebGallery
[ "2d8df641292eb78c3a18219e05086fa7a4fd414f" ]
[ "website/generate.py" ]
[ "#!/usr/bin/env python3\nimport glob\nimport datetime\nimport hashlib\nimport re\nimport os\nimport json\nimport numpy as np\nimport base64\nfrom jinja2 import Template, Environment, FileSystemLoader\nimport jinja2.filters as filters\nimport datetime\nimport re\nfrom PIL import Image\nfrom PIL import ImageCms\nfrom PIL import ExifTags\nfrom libxmp import utils,XMPFiles,consts\n\nimport dropbox as db\nimport clone\nimport store\nfrom util import *\nfrom categories import sortbycategories,loadAndValidateCategories\nimport datetime\n\nviewerPath = \"view/{}.html\"\npathTemplate = \"img/thumbnails/{}_{}.jpg\"\n\ndef hashId(name,id):\n return base64.urlsafe_b64encode(hashlib.sha1((name+id).encode('utf-8')).digest()).decode('utf-8')\n\ntoLink = lambda x: addSlash(StripHTMLExt(x))\n\n\ndate2year = lambda x: datetime.datetime.strptime(x, \"%Y:%m:%d %H:%M:%S\").year\ndef sortByYears(inventory):\n newinventory = {}\n for pic in inventory:\n year = date2year(pic['date'])\n if not(year in newinventory):\n newinventory[year] = []\n newinventory[year].append(pic)\n return newinventory\n\n\nfilters.FILTERS['sortbyyears'] = sortByYears\nfilters.FILTERS['tolink'] = toLink\nfilters.FILTERS['date2year'] = date2year\nfilters.FILTERS['sortbycategories'] = sortbycategories\n\n\ndef addMeta(dropbox,token):\n need_download = False\n metadata = {}\n name = os.path.splitext(dropbox['name'])[0]\n hashedId = hashId(name,dropbox['id_stripped'])\n filename = \"img/meta/{}.json\".format(hashedId)\n try:\n with open(filename,'r') as f:\n metadata=json.loads(f.read())\n if metadata['dropbox']['rev'] != hashId(name,dropbox['content_hash']):\n need_download=True\n except FileNotFoundError:\n need_download=True\n \n if need_download:\n db.downloadFile(dropbox,token)\n metadata['dropbox'] = {\n 'id': hashedId,\n 'rev': hashId(name,dropbox['content_hash']),\n 'outdated': True\n }\n metadata['name'] = name\n with open(filename,'w') as f:\n json.dump(metadata,f)\n return metadata\n\n\n\ndef globFiles():\n ls = glob.glob('img/meta/*.json')\n if not ls:\n raise Exception('No image files found')\n else:\n return ls\n\ndef genThumbnails(id,img):\n sizes = [150,300,512,1024,3000]\n names = ['tiny', 'small','medium','large','huge']\n icc_profile=img.info.get('icc_profile')\n for (s,name) in zip(sizes,names):\n thumb = img.copy()\n thumb.thumbnail( (s,s) )\n path = pathTemplate.format(id,name)\n thumb.save(path,quality=85,optimize=True, icc_profile=icc_profile)\n yield (name, {\n 'path' : path,\n 'width': thumb.width,\n 'height': thumb.height\n })\n\n\ndef processImages():\n files = globFiles()\n numFiles = len(files)\n i = 1\n print('Processing images...')\n for meta in files:\n metafile=meta\n with open(meta,'r') as f:\n meta=json.load(f)\n\n\n if not meta['dropbox']['outdated']:\n print(\"({}/{}) [up to date]\\r\".format(i,numFiles),end='')\n yield meta\n\n else:\n meta['dropbox']['outdated']=False\n f = 'img/raw/{}.jpg'.format(meta['name'])\n try:\n with Image.open(f) as img:\n exif = img._getexif()\n xmpObj = XMPFiles(file_path=f, open_forupdate=False).get_xmp()\n xmp = utils.object_to_dict(xmpObj)\n\n if consts.XMP_NS_DC in xmp: \n xmp=xmp[consts.XMP_NS_DC]\n purlOrg={}\n for k,v,_ in xmp:\n purlOrg[k] = v\n purlTitleKey = \"dc:title[1]\"\n purlTitle=None\n if purlTitleKey in purlOrg:\n purlTitle = purlOrg[purlTitleKey]\n if purlTitle.strip() == \"\":\n purlTitle=None\n else:\n purlOrg=None\n displayname = meta['name'] \n if purlTitle:\n displayname=purlTitle\n print(\"Using XMP title {}\".format(displayname))\n\n w,h = img.size\n tag = lambda x : exif[TAGS_NR[x]] if TAGS_NR[x] in exif else None\n avg=np.round(np.mean(np.array(img),axis=(0,1)))\n id = meta['dropbox']['id']\n avghex= ('#%02x%02x%02x' % tuple(avg.astype(int)))\n date = tag('DateTimeOriginal')\n if not date:\n date = tag('DateTimeDigitized')\n if not date:\n date = tag('DateTime')\n if not date:\n raise 'Image file contains no date information!'\n obj= {\n 'name': meta['name'],\n 'displayname': displayname,\n 'dropbox': meta['dropbox'],\n 'date': date,\n 'xmp': xmpObj.serialize_to_str(),\n 'rating': tag('Rating'),\n 'view': viewerPath.format(id),\n 'Copyright': tag('Copyright'),\n 'colour': avghex,\n 'original': {\n 'path' : f,\n 'width': img.width,\n 'height': img.height\n }\n }\n for (n,o) in genThumbnails(id,img):\n obj[n]=o\n\n with open(metafile,'w') as f:\n json.dump(obj,f)\n yield obj\n print(\"({}/{}) [ ok ]\\r\".format(i,numFiles),end='')\n except FileNotFoundError as e:\n removeMeta(meta) # The meta data failed for some reason, \n # Remove it to force reload\n raise e\n i=i+1\n print('')\n\ndef genInventory():\n dateKey = lambda x : datetime.datetime.strptime(x['date'], \"%Y:%m:%d %H:%M:%S\")\n inventory = sorted(list(processImages()),key=dateKey,reverse=True)\n return inventory\n\ndef genHTML():\n print('Generating website...')\n loadAndValidateCategories()\n \n year =datetime.datetime.now().year\n inventory = genInventory()\n storeData = store.generateStore(inventory)\n websiteName =os.getenv('WEBSITE_URL')\n\n if not websiteName:\n websiteName = \"/\"\n with open(\"version.json\",'r') as f:\n versionObj = json.load(f)\n if versionObj['git']:\n gitSha = versionObj['git']\n else:\n gitSha = 'DEV'\n gAdId = os.getenv('G_ANALYTICS_ID')\n\n\n environment = Environment(loader=FileSystemLoader(\"templates/\"))\n for templateName in environment.list_templates(\".template\"):\n template = environment.get_template(templateName)\n filename = os.path.basename(templateName)\n hname = os.path.splitext(filename)[0]\n name,suffix = os.path.splitext(hname)\n\n \n if suffix == \".html\":\n if name == \"viewer\":\n for (i,img) in zip(range(0,len(inventory)),inventory):\n print(\"Generating view ({}/{})\\r\".format(i+1,len(inventory)),end='')\n prev = inventory[i-1]['view'] if i > 0 else None\n next = inventory[i+1]['view'] if i+1 < len(inventory) else None\n jsonPath= toJsonPath(img['view'])\n template.stream(pic=img,inventory=inventory,index=i,prev=prev,next=next,year=year,gitSha=gitSha,json=toLink(jsonPath)).dump(img['view'])\n with open(jsonPath ,'w') as jv:\n toSrc = lambda img : \"{} {}w\".format(toLink(img['path']),img['width'])\n obj = {\n 'name': img['displayname'],\n 'id' : name,\n 'colour': img['colour'],\n 'path': toLink(img['large']['path']),\n 'url': toLink(img['view']),\n 'srcset' : \"{},{}\".format(*list(map(lambda size : toSrc(img[size]),['large','huge']))),\n 'next': toLink(toJsonPath(next)),\n 'prev': toLink(toJsonPath(prev))\n }\n json.dump(obj,jv)\n print('')\n else:\n if name == 'store' and not(storeData):\n continue\n print(\"Generating \" + hname + \"...\")\n if gAdId:\n template.stream(inventory=inventory,year=year,websiteName=websiteName,gAdId=gAdId,gitSha=gitSha,storeData=storeData).dump(hname)\n else:\n template.stream(inventory=inventory,year=year,websiteName=websiteName,gitSha=gitSha,storeData=storeData).dump(hname)\n elif suffix == \".css\":\n print(\"WARN: Ignoring {}\".format(name))\n return inventory\n\n\n\n\n\ndef fetchDropbox():\n token = os.getenv('DROPBOX_API_TOKEN')\n foundMeta = []\n newMeta=[]\n removedMeta = []\n removePathNoFail('api/manifest.json')\n removePathNoFail('api/sitedata.json')\n \n for i in db.getFileMeta(token):\n if i['name'] == 'sitedata.json':\n db.downloadSitedata(i,token)\n else:\n meta= addMeta(i,token)\n foundMeta.append(meta)\n if meta['dropbox']['outdated']:\n newMeta.append(meta)\n \n\n for f in glob.glob('img/meta/*.json'):\n filename = os.path.basename(f)\n id = os.path.splitext(filename)[0]\n if id not in map(lambda x: x['dropbox']['id'],foundMeta):\n print('Purge metadata ' + filename)\n meta ={}\n with open(f,'r') as f:\n meta = json.load(f)\n removeMeta(meta)\n\n for f in glob.glob('img/raw/*.jpg'):\n filename = os.path.basename(f)\n name = os.path.splitext(filename)[0]\n if name not in map(lambda x : x['name'],foundMeta) :\n print('Purge image ' + name)\n removePathNoFail(f)\n try: \n with open('api/sitedata.json','r') as f:\n sitedata = json.load(f)\n except Exception as e:\n print(\"Failed do find sitedata.json in dropbox folder!\")\n raise\n\n inventory=genHTML()\n with open('api/manifest.json','w') as f:\n json.dump({\n 'last_update': datetime.datetime.now().isoformat(),\n 'host': os.getenv('HOSTNAME'),\n 'version': 4,\n 'img': {\n 'inventory': inventory,\n 'new': newMeta,\n 'removed': removedMeta\n }\n },f)\n\n\n\ndef main():\n try: \n\n if not os.getenv('MASTER_NODE_URL'):\n fetchDropbox()\n else:\n try:\n dropboxAvailable=False\n if os.getenv('DROPBOX_API_TOKEN'):\n dropboxAvailable=True\n manifest=clone.fetchWebsite(os.getenv('MASTER_NODE_URL'),dropboxAvailable)\n except Exception as e:\n if dropboxAvailable:\n print('Tried to clone master website but failed, reverting to dropbox')\n fetchDropbox()\n return\n else:\n raise e\n genHTML()\n with open('api/manifest.json','w') as f:\n json.dump(manifest,f)\n\n except Exception as e:\n clone.removePathNoFail('api/manifest.json')\n raise e\nmain()" ]
[ [ "numpy.array" ] ]
kalasuvaRahul/tensorflow
[ "2da3853684e5189c393b340c3c110d19b0f3875e" ]
[ "tensorflow/python/compat/compat.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 10, 22)\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibiltiy, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018).\n month: A month (1 <= month <= 12) in year.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args :\n year: A year (e.g. 2018).\n month: A month (1 <= month <= 12) in year.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.\n\n Yields:\n Nothing.\n \"\"\"\n global _FORWARD_COMPATIBILITY_HORIZON\n try:\n old_compat_date = _FORWARD_COMPATIBILITY_HORIZON\n _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)\n yield\n finally:\n _FORWARD_COMPATIBILITY_HORIZON = old_compat_date\n" ]
[ [ "tensorflow.python.util.tf_export.tf_export" ] ]
aishikchakraborty/fairseq-synsem
[ "52e3a7d09a7bd9e8d66c996e3b7b69215302e8ee" ]
[ "evaluation/evaluate.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nimport argparse\nimport csv\nimport os\nimport json\nfrom tqdm import tqdm\nimport time\nimport mmap\nimport _pickle as pickle\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')\nparser.add_argument('--data', type=str, default='data/XNLI-1.0/',\n help='location of the data corpus')\nparser.add_argument('--model', type=str, default='cbow_model.pb',\n help='location of the model')\nparser.add_argument('--emb', type=str, default='../embeddings',\n help='location of the data corpus')\nparser.add_argument('--random_seed', type=int, default=13370,\n help='random seed')\nparser.add_argument('--numpy_seed', type=int, default=1337,\n help='numpy random seed')\nparser.add_argument('--torch_seed', type=int, default=133,\n help='pytorch random seed')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--gpu', type=int, default=0,\n help='use gpu x')\nparser.add_argument('--emb-type', type=int, default=0,\n help='embedding type')\nparser.add_argument('--lang', type=str, default='en',\n help='language to use for xnli')\nparser.add_argument('--hidden-dim', type=int, default=500,\n help='hidden dimension size')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='batch size')\nparser.add_argument('--onnx-export', type=str, default='',\n help='path to export the final model in onnx format')\n\nargs = parser.parse_args()\n\nprint(args)\nif args.random_seed is not None:\n random.seed(args.random_seed)\nif args.numpy_seed is not None:\n np.random.seed(args.numpy_seed)\nif args.torch_seed is not None:\n torch.manual_seed(args.torch_seed)\n # Seed all GPUs with the same seed if available.\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.torch_seed)\n\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda:\" + str(args.gpu) if args.cuda else \"cpu\")\n\nprint('Load vocab and embeddings...')\nvocab = torch.load(args.emb + '/vocab.pb')\nprint(vocab[:10])\nw2idx = {w:idx for idx, w in enumerate(vocab)}\n\nif args.emb_type == 0:\n emb = torch.load(args.emb + '/sem_matrix.pb')\nelif args.emb_type == 1:\n emb = torch.load(args.emb + '/' + args.lang + '_syn_matrix.pb')\nelif args.emb_type == 2:\n emb = torch.load(args.emb + '/emb_matrix.pb')\nelse:\n emb1 = torch.load(args.emb + '/sem_matrix.pb')\n emb2 = torch.load(args.emb + '/' + args.lang + '_syn_matrix.pb')\n emb = torch.cat((emb1, emb2), dim=1)\n\n\nembedding_dim = emb.size(1)\nprint(len(vocab))\n\nprint('Loaded vocab and embeddings')\n\ngold_label2idx = {'entailment': 0, 'contradiction': 1, 'neutral': 2}\n\ndef convert_seq_to_id(sent):\n words_list = sent.split()\n ids_list = []\n for w in words_list:\n if w in vocab:\n ids_list.append(w2idx[w])\n else:\n ids_list.append(w2idx['<unk>'])\n return ids_list\n\n\ndef get_num_lines(file_path):\n fp = open(file_path, \"r+\")\n buf = mmap.mmap(fp.fileno(), 0)\n lines = 0\n while buf.readline():\n lines += 1\n return lines\n\ndef load_dataset(mode='train'):\n s1, s2, label = [], [], []\n with open(os.path.join(args.data, 'xnli.' + mode + '.jsonl'), 'r') as f:\n for lines in tqdm(f, total=get_num_lines(os.path.join(args.data, 'xnli.' + mode + '.jsonl'))):\n lines = json.loads(lines.rstrip('\\n'))\n if lines['language'] != args.lang:\n continue\n try:\n label.append(gold_label2idx[lines['gold_label']])\n s1.append(convert_seq_to_id(lines['sentence1']))\n s2.append(convert_seq_to_id(lines['sentence2']))\n except:\n continue\n\n return s1, s2, label\n\nprint('Loading dataset...')\n\n# if os.path.exists('data/multinli_train.pb') and os.path.exists('data/multinli_val.pb'):\n# train_data = pickle.load(open('data/multinli_train.pb', 'rb'))\n# val_data = pickle.load(open('data/multinli_val.pb', 'rb'))\n# # test_data = pickle.load(open('data/multinli_test.pb', 'rb'))\n#\n# train_s1 , train_s2, train_y = train_data[0], train_data[1], train_data[2]\n# val_s1 , val_s2, val_y = val_data[0], val_data[1], val_data[2]\n#\n# assert len(train_s1) == len(train_y), \"Length Mismatch\"\n# assert len(train_s2) == len(train_y), \"Length Mismatch\"\n#\n# assert len(val_s1) == len(val_y), \"Length Mismatch\"\n# assert len(val_s1) == len(val_y), \"Length Mismatch\"\n#\n#\n# # test_s1 , test_s2, test_y = test_data[0], test_data[1], test_data[2]\n# else:\ntest_s1 , test_s2, test_y = load_dataset('test')\nval_s1 , val_s2, val_y = load_dataset('dev')\n\nprint('Finished loading dataset.')\n\nclass CBOW(nn.Module):\n def __init__(self, num_labels, vocab_size, embedding_dim, pretrained_embed_path, pad_idx):\n super(CBOW, self).__init__()\n self.emb = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.l1 = nn.Linear(4*embedding_dim, args.hidden_dim)\n self.l2 = nn.Linear(args.hidden_dim, args.hidden_dim)\n self.l3 = nn.Linear(args.hidden_dim, args.hidden_dim)\n self.l4 = nn.Linear(args.hidden_dim, num_labels)\n self.relu = nn.ReLU()\n self.pad_idx = pad_idx\n\n # self.init_weights(pretrained_embed_path)\n\n def init_weights(self, pretrained_embed_path):\n # return\n self.emb.weight.data.copy_(pretrained_embed_path)\n self.emb.weight.requires_grad = False\n\n def forward(self, x1, x2):\n emb_out1 = self.emb(x1)\n mask1 = 1 - (x1 == self.pad_idx).float()\n batch_len = torch.sum(mask1, dim=1).unsqueeze(1)\n sentence_emb1 = torch.div(torch.sum(emb_out1, dim=1) , batch_len)\n\n emb_out2 = self.emb(x2)\n mask2 = 1 - (x2 == self.pad_idx).float()\n batch_len = torch.sum(mask2, dim=1).unsqueeze(1)\n sentence_emb2 = torch.div(torch.sum(emb_out2, dim=1) , batch_len)\n\n # sentence_emb = torch.cat((sentence_emb1, sentence_emb2), dim=1)\n sentence_emb = torch.cat((sentence_emb1, sentence_emb2, torch.abs(sentence_emb1-sentence_emb2), sentence_emb1*sentence_emb2), dim=1)\n\n out = self.l4(self.relu(self.l3(self.relu(self.l2(self.relu(self.l1(sentence_emb)))))))\n return out\n\ncbow = CBOW(len(gold_label2idx.keys()), len(vocab), embedding_dim, emb, w2idx['<pad>']).to(device)\ncbow = torch.load(args.model)\n# cbow.init_weights(emb)\noptimizer = optim.Adam(cbow.parameters(), lr=1e-4)\ncriterion = nn.CrossEntropyLoss()\n\ndef pad_sequences(s):\n pad_token = w2idx['<pad>']\n # print(s)\n lengths = [len(s1) for s1 in s]\n longest_sent = max(lengths)\n padded_X = np.ones((args.batch_size, longest_sent), dtype=np.int64) * pad_token\n for i, x_len in enumerate(lengths):\n sequence = s[i]\n padded_X[i, 0:x_len] = sequence[:x_len]\n # print(padded_X)\n return padded_X\n\n\ndef evaluate(data_source):\n cbow.eval()\n total_loss = 0\n total_acc = 0\n\n data_s1 = data_source[0]\n data_s2 = data_source[1]\n data_y = data_source[2]\n\n num_iterations = len(data_y) // args.batch_size\n with torch.no_grad():\n for i in range(num_iterations):\n\n batch_s1 = pad_sequences(data_s1[i * args.batch_size : (i+1) * args.batch_size])\n batch_s2 = pad_sequences(data_s2[i * args.batch_size : (i+1) * args.batch_size])\n batch_y = data_y[i * args.batch_size : (i+1) * args.batch_size]\n\n batch_s1 = torch.LongTensor(batch_s1).to(device)\n batch_s2 = torch.LongTensor(batch_s2).to(device)\n batch_y = torch.LongTensor(batch_y).to(device)\n\n predictions = cbow(batch_s1, batch_s2)\n\n predictions_np = np.argmax(F.softmax(predictions, dim=1).cpu().detach().numpy(), axis=1)\n acc = np.mean(predictions_np == batch_y.cpu().numpy())\n\n loss = criterion(predictions, batch_y)\n # print(loss)\n\n total_loss += loss.item()\n total_acc += acc\n\n curr_loss = total_loss / num_iterations\n curr_acc = total_acc / num_iterations\n\n print('|lr {:02.10f} | loss {:5.2f} | acc {:5.2f}' \\\n .format(optimizer.param_groups[0]['lr'], curr_loss, curr_acc))\n\n return curr_loss, curr_acc\n\nevaluate((test_s1, test_s2, test_y))\n" ]
[ [ "torch.abs", "torch.nn.CrossEntropyLoss", "torch.LongTensor", "torch.nn.functional.softmax", "numpy.random.seed", "torch.load", "torch.cat", "torch.manual_seed", "torch.sum", "torch.nn.Embedding", "numpy.ones", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.nn.ReLU" ] ]
seqsense/f-pointnet
[ "d20bb1393dfee0c969f02fc30dfbdcb756317020" ]
[ "infer/prep_data_infer.py" ]
[ "import os\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__)) # '../frustum-pointnets/infer'\nROOT_DIR = os.path.dirname(BASE_DIR) # '../frustum-pointnets'\nsys.path.append(ROOT_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'mayavi'))\n#sys.path.append(os.path.join(ROOT_DIR, 'train'))\n\nif os.path.exists('/opt/ros/kinetic/lib/python2.7/dist-packages/'):\n sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\n\nimport argparse\nimport time\nimport cv2\nimport numpy as np\nimport mxnet as mx\nimport importlib\nimport matplotlib.pyplot as plt\nimport gluoncv\nimport kitti.kitti_util as utils\nfrom train.test import test_from_rgb_detection, get_session_and_ops\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\nparser.add_argument('--model', default='frustum_pointnets_v1', help='Model name [default: frustum_pointnets_v1]')\nparser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size for inference [default: 32]')\nparser.add_argument('--output', default='test_results', help='output file/folder name [default: test_results]')\nparser.add_argument('--data_path', default=None, help='frustum dataset pickle filepath [default: None]')\nparser.add_argument('--from_rgb_detection', action='store_true', help='test from dataset files from rgb detection.')\nparser.add_argument('--idx_path', default=None, help='filename of txt where each line is a data idx, used for rgb detection -- write <id>.txt for all frames. [default: None]')\nparser.add_argument('--dump_result', action='store_true', help='If true, also dump results to .pickle file')\nFLAGS = parser.parse_args()\n\n# Set training configurations\nBATCH_SIZE = FLAGS.batch_size\nMODEL_PATH = FLAGS.model_path\nGPU_INDEX = FLAGS.gpu\nNUM_POINT = FLAGS.num_point # 1024\nMODEL = importlib.import_module(FLAGS.model)\nNUM_CLASSES = 2\nNUM_CHANNEL = 4\n\n#raw_input = input()\n\nclass calib_infer():\n ''' Calibration matrices and utils\n 3d XYZ in <label>.txt are in rect camera coord.\n 2d box xy are in image2 coord\n Points in <lidar>.bin are in Velodyne coord.\n\n y_image2 = P^2_rect * x_rect\n y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo\n x_ref = Tr_velo_to_cam * x_velo\n x_rect = R0_rect * x_ref\n\n P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;\n 0, f^2_v, c^2_v, -f^2_v b^2_y;\n 0, 0, 1, 0]\n = K * [1|t]\n\n image2 coord:\n ----> x-axis (u)\n |\n |\n v y-axis (v)\n\n velodyne coord:\n front x, left y, up z\n\n rect/ref camera coord:\n right x, down y, front z\n '''\n def __init__(self, calib_dir):\n calibs = self.read_calib_file(calib_dir)\n # Tr_velo_to_cam [4, 4]\n self.V2C = np.zeros([3, 4])\n self.V2C[:, :3] = np.reshape(calibs['R'], [3, 3])\n self.V2C[:, 3:4] = np.reshape(calibs['T'], [3, 1])\n self.C2V = utils.inverse_rigid_trans(self.V2C)\n # P2\n self.P = np.reshape(calibs['P_rect_02'], [3,4])\n # R0\n self.R0 = np.reshape(calibs['R_rect_00'], [3,3])\n # Camera intrinsics and extrinsics\n self.c_u = self.P[0, 2]\n self.c_v = self.P[1, 2]\n self.f_u = self.P[0, 0]\n self.f_v = self.P[1, 1]\n self.b_x = self.P[0, 3] / (-self.f_u) # relative\n self.b_y = self.P[1, 3] / (-self.f_v)\n\n def read_calib_file(self, calib_dir):\n data = {}\n cam_to_cam_file = os.path.join(calib_dir, 'calib_cam_to_cam.txt')\n velo_to_cam_file = os.path.join(calib_dir, 'calib_velo_to_cam.txt')\n with open(cam_to_cam_file, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line)==0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n with open(velo_to_cam_file, 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line)==0: continue\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data\n\n def cart2hom(self, pts_3d):\n ''' Input: nx3 points in Cartesian\n Oupput: nx4 points in Homogeneous by pending 1\n '''\n n = pts_3d.shape[0]\n pts_3d_hom = np.hstack((pts_3d, np.ones((n,1))))\n return pts_3d_hom\n\n # ===========================\n # ------- 3d to 3d ----------\n # ===========================\n def project_velo_to_ref(self, pts_3d_velo):\n pts_3d_velo = self.cart2hom(pts_3d_velo) # nx4\n return np.dot(pts_3d_velo, np.transpose(self.V2C))\n\n def project_ref_to_velo(self, pts_3d_ref):\n pts_3d_ref = self.cart2hom(pts_3d_ref) # nx4\n return np.dot(pts_3d_ref, np.transpose(self.C2V))\n\n def project_rect_to_ref(self, pts_3d_rect):\n ''' Input and Output are nx3 points '''\n return np.transpose(np.dot(np.linalg.inv(self.R0), np.transpose(pts_3d_rect)))\n\n def project_ref_to_rect(self, pts_3d_ref):\n ''' Input and Output are nx3 points '''\n return np.transpose(np.dot(self.R0, np.transpose(pts_3d_ref)))\n\n def project_rect_to_velo(self, pts_3d_rect):\n ''' Input: nx3 points in rect camera coord.\n Output: nx3 points in velodyne coord.\n '''\n pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)\n return self.project_ref_to_velo(pts_3d_ref)\n\n def project_velo_to_rect(self, pts_3d_velo):\n pts_3d_ref = self.project_velo_to_ref(pts_3d_velo)\n return self.project_ref_to_rect(pts_3d_ref)\n\n # ===========================\n # ------- 3d to 2d ----------\n # ===========================\n def project_rect_to_image(self, pts_3d_rect):\n ''' Input: nx3 points in rect camera coord.\n Output: nx2 points in image2 coord.\n '''\n pts_3d_rect = self.cart2hom(pts_3d_rect)\n pts_2d = np.dot(pts_3d_rect, np.transpose(self.P)) # nx3\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n return pts_2d[:, 0:2]\n\n def project_velo_to_image(self, pts_3d_velo):\n ''' Input: nx3 points in velodyne coord.\n Output: nx2 points in image2 coord.\n '''\n pts_3d_rect = self.project_velo_to_rect(pts_3d_velo)\n return self.project_rect_to_image(pts_3d_rect)\n\n # ===========================\n # ------- 2d to 3d ----------\n # ===========================\n def project_image_to_rect(self, uv_depth):\n ''' Input: nx3 first two channels are uv, 3rd channel\n is depth in rect camera coord.\n Output: nx3 points in rect camera coord.\n '''\n n = uv_depth.shape[0]\n x = ((uv_depth[:, 0] - self.c_u) * uv_depth[:, 2]) / self.f_u + self.b_x\n y = ((uv_depth[:, 1] - self.c_v) * uv_depth[:, 2]) / self.f_v + self.b_y\n pts_3d_rect = np.zeros((n, 3))\n pts_3d_rect[:, 0] = x\n pts_3d_rect[:, 1] = y\n pts_3d_rect[:, 2] = uv_depth[:, 2]\n return pts_3d_rect\n\n def project_image_to_velo(self, uv_depth):\n pts_3d_rect = self.project_image_to_rect(uv_depth)\n return self.project_rect_to_velo(pts_3d_rect)\n\n\nclass kitti_object_infer():\n def __init__(self, root_dir):\n self.root_dir = root_dir\n self.num_samples = 109\n\n self.image_dir = os.path.join(self.root_dir, 'image_02/data')\n self.lidar_dir = os.path.join(self.root_dir, 'velodyne_points/data')\n self.calib_dir = os.path.join(self.root_dir, '2011_09_26_calib/2011_09_26')\n # self.image_dir = os.path.join(self.root_dir, 'image_02\\\\data')\n # self.calib_dir = os.path.join(self.root_dir, '2011_09_26_calib\\\\2011_09_26')\n # self.lidar_dir = os.path.join(self.root_dir, 'velodyne_points\\\\data')\n\n def __len__(self):\n return self.num_samples\n\n def get_image(self, idx):\n assert(idx < self.num_samples)\n img_filename = os.path.join(self.image_dir, '%010d.png'%(idx))\n #img_filename = os.path.join(self.image_dir, '0000000000.png')\n print('filename: ', img_filename)\n return utils.load_image(img_filename), img_filename\n\n def get_lidar(self, idx):\n assert(idx < self.num_samples)\n lidar_filename = os.path.join(self.lidar_dir, '%010d.bin'%(idx))\n #lidar_filename = os.path.join(self.lidar_dir, '0000000000.bin')\n return utils.load_velo_scan(lidar_filename)\n\n def get_calibration(self):\n return calib_infer(self.calib_dir)\n\ndef get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax,\n return_more=False, clip_distance=2.0):\n ''' Filter lidar points, keep those in image FOV '''\n pts_2d = calib.project_velo_to_image(pc_velo)\n fov_inds = (pts_2d[:, 0] < xmax) & (pts_2d[:, 0] >= xmin) & \\\n (pts_2d[:, 1] < ymax) & (pts_2d[:, 1] >= ymin)\n fov_inds = fov_inds & (pc_velo[:, 0] > clip_distance)\n imgfov_pc_velo = pc_velo[fov_inds, :]\n if return_more:\n return imgfov_pc_velo, pts_2d, fov_inds\n else:\n return imgfov_pc_velo # [m, 4]\n\ndef show_lidar(pc_velo, calib, fig, img_fov=False, img_width=None, img_height=None):\n ''' Show all LiDAR points.\n Draw 3d box in LiDAR point cloud (in velo coord system) '''\n if 'mlab' not in sys.modules: import mayavi.mlab as mlab\n from viz_util import draw_lidar\n\n #mlab.clf(fig)\n print(('All point num: ', pc_velo.shape[0]))\n #fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500))\n if img_fov:\n pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0,\n img_width, img_height)\n print(('FOV point num: ', pc_velo.shape[0]))\n draw_lidar(pc_velo, fig=fig)\n mlab.show(30)\n\ndef show_lidar_on_image(pc_velo, img, calib, img_width, img_height):\n ''' Project LiDAR points to image '''\n imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,\n calib, 0, 0, img_width, img_height, True)\n imgfov_pts_2d = pts_2d[fov_inds,:]\n imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)\n\n cmap = plt.cm.get_cmap('hsv', 256)\n cmap = np.array([cmap(i) for i in range(256)])[:,:3]*255\n\n for i in range(imgfov_pts_2d.shape[0]):\n depth = imgfov_pc_rect[i,2]\n color = cmap[int(640.0/depth),:]\n cv2.circle(img, (int(np.round(imgfov_pts_2d[i,0])),\n int(np.round(imgfov_pts_2d[i,1]))),\n 2, color=tuple(color), thickness=-1)\n #Image.fromarray(img).show()\n cv2.imshow('lidar on image', img)\n cv2.waitKey(30)\n return img\n\ndef transform_bbox_inverse(bbox_lists, img_ori_shape, img_shape):\n # 将yolo得出的bbox映射回原图像\n # img_shape: (w, h, c) , inputs of YOLO\n # img_ori_shape: (w, h, c), origin image shape\n w_ori, h_ori, _ = img_ori_shape\n w, h, _ = img_shape\n scale_w = w_ori / w\n scale_h = h_ori / h\n bbox_lists[:, 0] *= scale_w\n bbox_lists[:, 2] *= scale_w\n bbox_lists[:, 1] *= scale_h\n bbox_lists[:, 3] *= scale_h\n\n bbox_lists = bbox_lists.astype(int)\n return bbox_lists\n\ndef get_2d_box_yolo(img, net):\n '''\n :param img: ndarray, BGR\n net: gluoncv model_zoo\n\n :return: NDArray (mxnet)\n class_IDs: [batch, 100, 1], 使用时仅用{'Car_6': 0, 'Pedestrian_14': 1, 'Cyclist_1': 2}\n 0 1 2 3 4 5 6 7 8 9 10\n aeroplane bicycle bird boat bottle bus car cat chair cow diningtable\n 11 12 13 14 15 16 17 18 19\n dog horse motorbike person pottedplant sheep sofa train tvmonitor\n\n scores: [batch, 100, 1]\n bounding_boxes: [batch, 100, 4], [xmin, ymin, xmax, ymax]\n '''\n #net = gluoncv.model_zoo.get_model('yolo3_darknet53_voc', pretrained=True)\n img_ori = img\n img = mx.nd.array(img[:, :, ::-1])\n x, img = gluoncv.data.transforms.presets.yolo.transform_test(img, short = 512)\n class_IDs, scores, bounding_boxs = net(x.as_in_context(mx.gpu(0)))\n\n # 选出检测到的物体\n class_IDs, scores, bounding_boxs = class_IDs.asnumpy(), scores.asnumpy(), bounding_boxs.asnumpy()\n class_id_index = np.where(class_IDs > -1)\n class_IDs = class_IDs[class_id_index]\n scores = scores[class_id_index]\n bounding_boxs = bounding_boxs[:, :len(class_IDs), :].squeeze(0)\n\n # 去掉车、人、自行车以外的物体\n class_id_index = [i for i, e in enumerate(class_IDs) if e in [6, 14, 1]]\n class_IDs = class_IDs[class_id_index]\n scores = scores[class_id_index]\n bounding_boxs = bounding_boxs[class_id_index, :]\n bounding_boxs = transform_bbox_inverse(bounding_boxs, img_ori.shape, img.shape)\n\n\n return class_IDs, scores, bounding_boxs\n\ndef extract_data(dataset, net, data_idx):\n type_whitelist = [6, 14, 1] # 6:car 14: person 1:bicycle\n id_list = []\n box2d_list = [] # [xmin,ymin,xmax,ymax]\n type_list = []\n prob_list = []\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n frustum_angle_list = []\n\n img, _ = dataset.get_image(data_idx)\n calib = dataset.get_calibration()\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])\n\n pc_rect[:, 3] = pc_velo[:, 3]\n img_height, img_width, img_channel = img.shape\n det_type_list, det_prob_list, det_box2d_list = get_2d_box_yolo(img, net) # 0.8s\n show_image_with_2d_boxes(img, det_box2d_list)\n\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov( \\\n pc_velo[:, 0:3], calib, 0, 0, img_width, img_height, True)\n\n for obj_idx in range(len(det_type_list)):\n if det_type_list[obj_idx] not in type_whitelist : continue\n\n box2d = det_box2d_list[obj_idx]\n xmin, ymin, xmax, ymax = box2d\n box_fov_inds = (pc_image_coord[:, 0] < xmax) & \\\n (pc_image_coord[:, 0] >= xmin) & \\\n (pc_image_coord[:, 1] < ymax) & \\\n (pc_image_coord[:, 1] >= ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds, :]\n #print('pc_in_fov: ', pc_in_box_fov.shape[0])\n if pc_in_box_fov.shape[0] == 0:\n continue\n # get frustum angle\n box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])\n uvdepth = np.zeros((1, 3))\n uvdepth[0, 0:2] = box2d_center\n uvdepth[0, 2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],\n box2d_center_rect[0, 0])\n\n id_list.append(obj_idx)\n box2d_list.append(np.array([xmin,ymin,xmax,ymax]))\n input_list.append(pc_in_box_fov)\n type_list.append(det_type_list[obj_idx])\n prob_list.append(det_prob_list[obj_idx])\n frustum_angle_list.append(frustum_angle)\n\n data = {}\n data['id_list'] = id_list\n data['box2d'] = box2d_list\n data['pc_in_box'] = input_list\n data['type'] = type_list\n data['prob'] = prob_list\n data['frustum_angle'] = frustum_angle_list\n return data\n\ndef rotate_pc_along_y(pc, rot_angle):\n '''\n Input:\n pc: numpy array (N,C), first 3 channels are XYZ\n z is facing forward, x is left ward, y is downward\n rot_angle: rad scalar\n Output:\n pc: updated pc with XYZ rotated\n '''\n cosval = np.cos(rot_angle)\n sinval = np.sin(rot_angle)\n rotmat = np.array([[cosval, -sinval],[sinval, cosval]])\n pc[:,[0,2]] = np.dot(pc[:,[0,2]], np.transpose(rotmat))\n return pc\n\nclass frustum_data_infer():\n def __init__(self, data, npoints, random_flip = False, random_shift = False,\n rotate_to_center = False, one_hot = False):\n self.npoints = npoints\n self.random_flip = random_flip\n self.random_shift = random_shift\n self.rotate_to_center = rotate_to_center\n self.one_hot = one_hot\n\n self.id_list = data['id_list']\n self.box2d_list = data['box2d']\n self.input_list = data['pc_in_box']\n self.type_list = data['type']\n self.frustum_angle_list = data['frustum_angle']\n self.prob_list = data['prob']\n\n def __len__(self):\n return len(self.input_list)\n\n def __getitem__(self, index):\n rot_angle = self.get_center_view_rot_angle(index)\n # Compute one hot vector\n type2onehotclass = {'6': 0, '14': 1, '1': 2}\n if self.one_hot:\n cls_type = str(int(self.type_list[index]))\n # print('cls_type: ', cls_type)\n assert (cls_type in ['6', '14', '1'])\n one_hot_vec = np.zeros((3))\n one_hot_vec[type2onehotclass[cls_type]] = 1\n\n # Get point cloud\n if self.rotate_to_center:\n point_set = self.get_center_view_point_set(index)\n else:\n point_set = self.input_list[index]\n\n # Resample\n if point_set.shape[0] > 0:\n choice = np.random.choice(point_set.shape[0], self.npoints, replace=True)\n point_set = point_set[choice, :]\n\n if self.one_hot:\n return point_set, rot_angle, self.prob_list[index], one_hot_vec\n else:\n return point_set, rot_angle, self.prob_list[index]\n\n def get_center_view_rot_angle(self, index):\n ''' Get the frustum rotation angle, it isshifted by pi/2 so that it\n can be directly used to adjust GT heading angle '''\n return np.pi/2.0 + self.frustum_angle_list[index]\n\n def get_center_view_point_set(self, index):\n ''' Frustum rotation of point clouds.\n NxC points with first 3 channels as XYZ\n z is facing forward, x is left ward, y is downward\n '''\n # Use np.copy to avoid corrupting original data\n point_set = np.copy(self.input_list[index])\n return rotate_pc_along_y(point_set, \\\n self.get_center_view_rot_angle(index))\n\ndef show_image_with_2d_boxes(img, box_list):\n for box in box_list:\n cv2.rectangle(img, (int(box[0]),int(box[1])),\n (int(box[2]),int(box[3])), (0,255,0), 2)\n cv2.imshow('img_with_box', img)\n cv2.waitKey(30)\n\ndef demo():\n if 'mlab' not in sys.modules: import mayavi.mlab as mlab\n from viz_util import draw_gt_boxes3d\n\n dataset = kitti_object_infer('/media/vdc/backup/database_backup/Chris/f-pointnet/2011_09_26_drive_0001_sync')\n calibs = dataset.get_calibration()\n #calibs = calib_infer('/media/vdc/backup/database_backup/Chris/f-pointnet/2011_09_26_drive_0001_sync/2011_09_26_calib/2011_09_26')\n #dataset = kitti_object_infer('D:\\\\Detectron_Data\\\\2011_09_26_drive_0001_sync')\n net = gluoncv.model_zoo.get_model('yolo3_darknet53_voc', pretrained=True, ctx=mx.gpu(0))\n sess, ops = get_session_and_ops(batch_size=BATCH_SIZE, num_point=NUM_POINT)\n fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500))\n for i in range(len(dataset)) :\n time1 = time.time()\n data = extract_data(dataset, net, i) # 0.9s\n TEST_DATASET = frustum_data_infer(data, 1024, rotate_to_center=True, one_hot=True) # us级\n box_3d_list = test_from_rgb_detection(TEST_DATASET, sess, ops, FLAGS.output+'.pickle', FLAGS.output) # 0.1s\n time2 = time.time()\n print('time: ', time2 - time1)\n\n mlab.clf(fig)\n img, _ = dataset.get_image(i)\n pc = dataset.get_lidar(i)[:, 0:3]\n show_lidar(pc, calibs, fig, img_fov=True, img_width=img.shape[1], img_height=img.shape[0])\n\n box3d_pts_3d_velo_list = []\n for box_3d in box_3d_list:\n box3d_pts_3d_velo = calibs.project_rect_to_velo(box_3d)\n box3d_pts_3d_velo_list.append(box3d_pts_3d_velo)\n draw_gt_boxes3d(box3d_pts_3d_velo_list, fig)\n '''\n img, _ = dataset.get_image(i)\n print('img: ', img.shape)\n pc = dataset.get_lidar(i)[:, 0:3]\n cv2.imshow('0', img)\n #show_lidar(pc, calibs, fig, img_fov = False, img_width = img.shape[1], img_height = img.shape[0])\n #show_lidar_on_image(pc, img, calibs, img_width=img.shape[1], img_height=img.shape[0])\n #cv2.waitKey(1)\n class_IDs, scores, bounding_boxs = get_2d_box_yolo(img, net)\n print('shape: ', class_IDs.shape, scores.shape, bounding_boxs.shape)\n '''\n if i % 10 == 0:\n input()\n input()\n\nif __name__ == '__main__':\n print('start')\n demo()\n" ]
[ [ "matplotlib.pyplot.cm.get_cmap", "numpy.random.choice", "numpy.reshape", "numpy.linalg.inv", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.ones", "numpy.copy", "numpy.round", "numpy.zeros_like", "numpy.transpose", "numpy.array", "numpy.where", "numpy.zeros" ] ]
VicHug27/web-scraping-challenge
[ "f2ee3a7eeabeca473cbf0e712a17e79e8eec45d6" ]
[ "scrape_mars.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Import dependencies and setup\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport requests\nimport pymongo\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\n# In[2]:\n\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# ### Step1 Scraping\n\n# In[3]:\n\n\n#NASA Mars News\nurl = \"https://mars.nasa.gov/news/\"\nbrowser.visit(url)\n\nhtml = browser.html\nsoup = bs(html, 'html.parser')\n\n\n# In[4]:\n\n\n# Search for news title\nt_results = soup.find_all('div', class_='content_title')\n\n# Search for paragraph text under news titles\np_results = soup.find_all('div', class_='article_teaser_body')\n\n# Extract first title and paragraph, and assign to variables\ntitle = t_results[0].text\nparagraph = p_results[0].text\n\nprint(title)\nprint(paragraph)\n\n\n# In[5]:\n\n\n#JPL Mars image\nurl = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'\nbrowser.visit(url)\n\nhtml = browser.html\nsoup = bs(html, 'html.parser')\n \nfeatured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/image/featured/mars2.jpg'\nprint(featured_image_url )\n\n\n# In[6]:\n\n\n#Mars Facts\nurl = 'https://space-facts.com/mars/'\nfacts = pd.read_html(\"https://space-facts.com/mars/\")[0]\nprint(facts)\n\n\n# In[7]:\n\n\n#Mars Hemispheres\nurl = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\nbrowser.visit(url)\n\n\n# ### Step2 MongoDB and Flask\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_html" ] ]
manzar96/st7
[ "8dac6fa3497e5a3594766a232a9e8436120e9563" ]
[ "experiments/task71/features_extraction/train_pca_clf.py" ]
[ "import pickle\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom core.utils.parser import get_feat_parser\n\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC,LinearSVC,LinearSVR\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom xgboost import XGBClassifier\n# from EvoDAG.model import EvoDAG,EvoDAGE\n# from EvoMSA.base import EvoMSA\nfrom sklearn.decomposition import PCA\n\nparser = get_feat_parser()\noptions = parser.parse_args()\n\nif options.features is None:\n raise IOError(\"Enter features!\")\n\ndict = pickle.load(open(options.features, \"rb\"))\n\n\nfeats=[]\nhumor = []\nfor key in dict.keys():\n value = dict[key]\n feats.append(value[0].tolist())\n humor.append(value[1].tolist())\nfeats = np.array(feats)\nhumor = np.array(humor)\n\n\n\nif options.clf == 'GaussianProc':\n clf = GaussianProcessClassifier()\nelif options.clf == \"SVC\":\n clf = SVC()\nelif options.clf == \"LinearSVC\":\n clf = LinearSVC(max_iter=10000,dual=False)\nelif options.clf == \"DecisionTree\":\n clf = DecisionTreeClassifier()\nelif options.clf == \"RandomForest\":\n clf = RandomForestClassifier()\nelif options.clf == \"AdaBoost\":\n clf = AdaBoostClassifier(n_estimators=100)\nelif options.clf == \"XGBoost\":\n clf = XGBClassifier()\nelif options.clf == \"KNN\":\n clf = KNeighborsClassifier(n_neighbors=5)\nelif options.clf == \"GaussianNB\":\n clf = GaussianNB()\nelif options.clf == \"RBF\":\n kernel = 1.0 * RBF(1.0)\n clf = GaussianProcessClassifier(kernel=kernel, random_state=0)\nelif options.clf == \"EvoDAGE\":\n clf = EvoDAGE(n_estimators=30, n_jobs=4)\nelif options.clf == \"EvoDAG\":\n clf = EvoDAG()\n# elif options.clf == \"EvoMSA\":\n# clf = EvoMSa(Emo=True, lang='es')\nelse:\n raise IOError(\"Please select a valid clf!\")\n\n# perform kfold cross-validation with k=5\nkf = KFold(n_splits=2)\npca = PCA(n_components=2)\n\nf1 = []\nacc = []\n\nfor train_index, test_index in kf.split(humor):\n X_train,X_test = feats[train_index], feats[test_index]\n X_train = pca.fit_transform(X_train)\n X_test = pca.transform(X_test)\n y_train,y_test = humor[train_index],humor[test_index]\n clf.fit(X_train, y_train)\n pred = clf.predict(X_test)\n f1.append(f1_score(y_test, pred))\n acc.append(accuracy_score(y_test, pred))\n\nprint(\"F1-score: \",np.mean(f1))\nprint(\"Accuracy score: \",np.mean(acc))\n\nif options.clf == 'GaussianProc':\n clf = GaussianProcessClassifier()\nelif options.clf == \"SVC\":\n clf = SVC()\nelif options.clf == \"LinearSVC\":\n clf = LinearSVC(max_iter=10000,dual=False)\nelif options.clf == \"DecisionTree\":\n clf = DecisionTreeClassifier()\nelif options.clf == \"RandomForest\":\n clf = RandomForestClassifier()\nelif options.clf == \"AdaBoost\":\n clf = AdaBoostClassifier()\nelif options.clf == \"XGBoost\":\n clf = XGBClassifier()\nelif options.clf == \"KNN\":\n clf = KNeighborsClassifier(n_neighbors=5)\nelif options.clf == \"GaussianNB\":\n clf = GaussianNB()\nelif options.clf == \"RBF\":\n kernel = 1.0 * RBF(1.0)\n clf = GaussianProcessClassifier(kernel=kernel, random_state=0)\nelif options.clf == \"EvoDAGE\":\n clf = EvoDAGE(n_estimators=30, n_jobs=4)\nelif options.clf == \"EvoDAG\":\n clf = EvoDAG()\n# elif options.clf == \"EvoMSA\":\n# clf = EvoMSa(Emo=True, lang='es')\nelse:\n raise IOError(\"Please select a valid clf!\")\n\npca = PCA(n_components=3)\nfeats = pca.fit_transform(feats)\n\nclf.fit(feats,humor)\nif not os.path.exists(options.ckpt):\n os.makedirs(options.ckpt)\npickle.dump(clf, open(os.path.join(options.ckpt,\"{}.pth\".format(options.clf)),\n \"wb\"))\npickle.dump(pca, open(os.path.join(options.ckpt,\"pca.pkl\"),\"wb\"))" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.ensemble.RandomForestClassifier", "sklearn.gaussian_process.kernels.RBF", "sklearn.model_selection.KFold", "sklearn.neighbors.KNeighborsClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "numpy.mean", "sklearn.svm.SVC", "sklearn.gaussian_process.GaussianProcessClassifier", "sklearn.svm.LinearSVC", "sklearn.metrics.f1_score", "numpy.array", "sklearn.decomposition.PCA", "sklearn.metrics.accuracy_score" ] ]
shrekris-anyscale/ray
[ "d36fd77548e1d83d6510b478f72ac668434458af", "d36fd77548e1d83d6510b478f72ac668434458af" ]
[ "python/ray/train/tests/test_huggingface_trainer.py", "python/ray/air/tests/test_huggingface_predictor.py" ]
[ "import pandas as pd\nimport pytest\nfrom unittest.mock import patch\nfrom ray.train.huggingface.huggingface_utils import TrainReportCallback\n\nfrom transformers import (\n AutoConfig,\n AutoModelForCausalLM,\n AutoTokenizer,\n Trainer,\n TrainingArguments,\n)\nfrom transformers.trainer_callback import TrainerState\n\nimport ray.data\nfrom ray.train.huggingface import HuggingFaceTrainer\nfrom ray.air.predictors.integrations.huggingface import HuggingFacePredictor\nfrom ray.air.batch_predictor import BatchPredictor\n\nfrom ray.train.tests._huggingface_data import train_data, validation_data\n\n# 16 first rows of tokenized wikitext-2-raw-v1 training & validation\ntrain_df = pd.read_json(train_data)\nvalidation_df = pd.read_json(validation_data)\nprompts = pd.DataFrame(\n [\"Complete me\", \"And me\", \"Please complete\"], columns=[\"sentences\"]\n)\n\n# We are only testing Casual Language Modelling here\n\nmodel_checkpoint = \"sshleifer/tiny-gpt2\"\ntokenizer_checkpoint = \"sgugger/gpt2-like-tokenizer\"\n\n\[email protected]\ndef ray_start_4_cpus():\n address_info = ray.init(num_cpus=4)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\ndef train_function(train_dataset, eval_dataset=None, **config):\n model_config = AutoConfig.from_pretrained(model_checkpoint)\n model = AutoModelForCausalLM.from_config(model_config)\n training_args = TrainingArguments(\n f\"{model_checkpoint}-wikitext2\",\n evaluation_strategy=\"epoch\",\n num_train_epochs=config.get(\"epochs\", 3),\n learning_rate=2e-5,\n weight_decay=0.01,\n disable_tqdm=True,\n no_cuda=True,\n save_strategy=config.get(\"save_strategy\", \"no\"),\n )\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n )\n return trainer\n\n\[email protected](\"save_strategy\", [\"no\", \"epoch\"])\ndef test_e2e(ray_start_4_cpus, save_strategy):\n ray_train = ray.data.from_pandas(train_df)\n ray_validation = ray.data.from_pandas(validation_df)\n scaling_config = {\"num_workers\": 2, \"use_gpu\": False}\n trainer = HuggingFaceTrainer(\n trainer_init_per_worker=train_function,\n trainer_init_config={\"epochs\": 4, \"save_strategy\": save_strategy},\n scaling_config=scaling_config,\n datasets={\"train\": ray_train, \"evaluation\": ray_validation},\n )\n result = trainer.fit()\n\n assert result.metrics[\"epoch\"] == 4\n assert result.metrics[\"training_iteration\"] == 4\n assert result.checkpoint\n\n trainer2 = HuggingFaceTrainer(\n trainer_init_per_worker=train_function,\n trainer_init_config={\"epochs\": 5}, # this will train for 1 epoch: 5 - 4 = 1\n scaling_config=scaling_config,\n datasets={\"train\": ray_train, \"evaluation\": ray_validation},\n resume_from_checkpoint=result.checkpoint,\n )\n result2 = trainer2.fit()\n\n assert result2.metrics[\"epoch\"] == 5\n assert result2.metrics[\"training_iteration\"] == 1\n assert result2.checkpoint\n\n predictor = BatchPredictor.from_checkpoint(\n result2.checkpoint,\n HuggingFacePredictor,\n task=\"text-generation\",\n tokenizer=AutoTokenizer.from_pretrained(tokenizer_checkpoint),\n )\n\n predictions = predictor.predict(ray.data.from_pandas(prompts))\n assert predictions.count() == 3\n\n\ndef test_reporting():\n reports = []\n\n def _fake_report(**kwargs):\n reports.append(kwargs)\n\n with patch(\"ray.train.report\", _fake_report):\n state = TrainerState()\n report_callback = TrainReportCallback()\n report_callback.on_epoch_begin(None, state, None)\n state.epoch = 0.5\n report_callback.on_log(None, state, None, logs={\"log1\": 1})\n state.epoch = 1\n report_callback.on_log(None, state, None, logs={\"log2\": 1})\n report_callback.on_epoch_end(None, state, None)\n report_callback.on_epoch_begin(None, state, None)\n state.epoch = 1.5\n report_callback.on_log(None, state, None, logs={\"log1\": 1})\n state.epoch = 2\n report_callback.on_log(None, state, None, logs={\"log2\": 1})\n report_callback.on_epoch_end(None, state, None)\n report_callback.on_train_end(None, state, None)\n\n assert len(reports) == 2\n assert \"log1\" in reports[0]\n assert \"log2\" in reports[0]\n assert reports[0][\"epoch\"] == 1\n assert \"log1\" in reports[1]\n assert \"log2\" in reports[1]\n assert reports[1][\"epoch\"] == 2\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n", "import os\nimport pandas as pd\nimport pytest\n\nfrom transformers import (\n AutoConfig,\n AutoModelForCausalLM,\n AutoTokenizer,\n)\nfrom transformers.pipelines import pipeline\n\nimport ray\nfrom ray.air.preprocessor import Preprocessor\nfrom ray.air.predictors.integrations.huggingface import HuggingFacePredictor\n\nprompts = pd.DataFrame(\n [\"Complete me\", \"And me\", \"Please complete\"], columns=[\"sentences\"]\n)\n\n# We are only testing Casual Language Modeling here\n\nmodel_checkpoint = \"sshleifer/tiny-gpt2\"\ntokenizer_checkpoint = \"sgugger/gpt2-like-tokenizer\"\n\n\[email protected]\ndef ray_start_runtime_env():\n # Requires at least torch 1.11 to pass\n # TODO update torch version in requirements instead\n runtime_env = {\"pip\": [\"torch==1.11.0\"]}\n address_info = ray.init(runtime_env=runtime_env)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\nclass DummyPreprocessor(Preprocessor):\n def transform_batch(self, df):\n self._batch_transformed = True\n return df\n\n\ndef test_predict(tmpdir, ray_start_runtime_env):\n @ray.remote\n def test(use_preprocessor):\n os.chdir(tmpdir)\n if use_preprocessor:\n preprocessor = DummyPreprocessor()\n else:\n preprocessor = None\n model_config = AutoConfig.from_pretrained(model_checkpoint)\n model = AutoModelForCausalLM.from_config(model_config)\n predictor = HuggingFacePredictor(\n pipeline=pipeline(\n task=\"text-generation\",\n model=model,\n tokenizer=AutoTokenizer.from_pretrained(tokenizer_checkpoint),\n ),\n preprocessor=preprocessor,\n )\n\n predictions = predictor.predict(prompts)\n\n assert len(predictions) == 3\n if preprocessor:\n assert hasattr(predictor.preprocessor, \"_batch_transformed\")\n\n ray.get(test.remote(use_preprocessor=True))\n ray.get(test.remote(use_preprocessor=False))\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n" ]
[ [ "pandas.read_json", "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
stciaischoolrnn/Practical-Time-Series-Analysis
[ "72eeabbcf2a3af742b2a114026cfd841b0ea9184" ]
[ "Chapter03/Chapter_3_tripleExponentialSmoothing.py" ]
[ "# Load modules\nfrom __future__ import print_function\nimport os\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Change working Directory\nos.chdir('C:/Users/pp9596/Documents/02 ZSP/00 PACKT/Book - Practical Time-Series Analysis/Avishek')\n\n#read the data from into a pandas.DataFrame\nwisc_emp = pd.read_csv('datasets/wisconsin-employment-time-series.csv')\n\n#Let's find out the shape of the DataFrame\nprint('Shape of the DataFrame:', wisc_emp.shape)\n\n#Let's see first 10 rows of it\nwisc_emp.head()\n\n# plot the wisconsin employment dataset\nwisc_emp.plot()\n\n# Capture seasonality component\ndef initialize_T(x, seasonLength):\n total=0.0\n for i in range(seasonLength):\n total+=float(x[i+seasonLength]-x[i])/seasonLength\n return total\n\ninitialize_T(wisc_emp['Employment'], 12)\n\n# Initialize seasonal trend\ndef initialize_seasonalilty(x, seasonLength):\n seasons={}\n seasonsMean=[]\n num_season=int(len(x)/seasonLength)\n # Compute season average\n for i in range(num_season):\n seasonsMean.append(sum(x[seasonLength*i:seasonLength*i+seasonLength])/float(seasonLength))\n \n # compute season intial values\n for i in range(seasonLength):\n tot=0.0\n for j in range(num_season):\n tot+=x[seasonLength*j+i]-seasonsMean[j]\n seasons[i]=tot/num_season\n return seasons\n \ninitialize_seasonalilty(wisc_emp['Employment'], 12) \n \n\n# Triple Exponential Smoothing Forecast\ndef triple_exp_smoothing(x, seasonLength, alpha, beta, gamma, h):\n yhat=[]\n S = initialize_seasonalilty(x, seasonLength)\n for i in range(len(x)+h):\n if i == 0:\n F = x[0]\n T = initialize_T(x, seasonLength)\n yhat.append(x[0])\n continue\n if i >= len(x):\n m = i - len(x) + 1\n yhat.append((F + m*T) + S[i%seasonLength])\n else:\n obsval = x[i]\n F_last, F= F, alpha*(obsval-S[i%seasonLength]) + (1-alpha)*(F+T)\n T = beta * (F-F_last) + (1-beta)*T\n S[i%seasonLength] = gamma*(obsval-F) + (1-gamma)*S[i%seasonLength]\n yhat.append(F+T+S[i%seasonLength])\n return yhat\n\n# Triple exponential smoothing\nwisc_emp['TES']=triple_exp_smoothing(wisc_emp['Employment'], 12, 0.4, 0.6, 0.2, 0)\n\n### Plot Single Exponential Smoothing forecasted value\nfig = plt.figure(figsize=(5.5, 5.5))\nax = fig.add_subplot(2,1,1)\nwisc_emp['Employment'].plot(ax=ax)\nax.set_title('Beer Production')\nax = fig.add_subplot(2,1,2)\nwisc_emp['TES'].plot(ax=ax, color='r')\nax.set_title('Triple Smoothing Forecast')\nplt.savefig('plots/ch2/B07887_03_14.png', format='png', dpi=300)" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
jhurreaq/py-pde
[ "42cd3e9cc45793840ecfe244e606c39b13502658" ]
[ "pde/fields/base.py" ]
[ "\"\"\"\nDefines base classes of fields, which are discretized on grids\n\n.. codeauthor:: David Zwicker <[email protected]>\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nimport json\nimport logging\nimport warnings\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom inspect import isabstract\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport numpy as np\n\nfrom ..grids.base import DimensionError, DomainError, GridBase, discretize_interval\nfrom ..grids.boundaries.axes import BoundariesData\nfrom ..grids.cartesian import CartesianGridBase\nfrom ..tools.cache import cached_method\nfrom ..tools.docstrings import fill_in_docstring\nfrom ..tools.misc import Number, number_array\nfrom ..tools.numba import jit, make_array_constructor\nfrom ..tools.plotting import (\n PlotReference,\n napari_add_layers,\n napari_viewer,\n plot_on_axes,\n)\nfrom ..tools.typing import ArrayLike, NumberOrArray\n\nif TYPE_CHECKING:\n from .scalar import ScalarField # @UnusedImport\n\n\nTField = TypeVar(\"TField\", bound=\"FieldBase\")\n\n\nclass RankError(TypeError):\n \"\"\"error indicating that the field has the wrong rank\"\"\"\n\n\nclass FieldBase(metaclass=ABCMeta):\n \"\"\"abstract base class for describing (discretized) fields\"\"\"\n\n _subclasses: Dict[str, Type[FieldBase]] = {} # all classes inheriting from this\n _grid: GridBase # the grid on which the field is defined\n __data_all: np.ndarray # the data on the grid including ghost points\n _data_valid: np.ndarray # the valid data without ghost points\n _label: Optional[str]\n\n def __init__(\n self,\n grid: GridBase,\n data: np.ndarray,\n *,\n label: Optional[str] = None,\n ):\n \"\"\"\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined\n data (:class:`~numpy.ndarray`, optional):\n Field values at the support points of the grid and the ghost cells\n label (str, optional):\n Name of the field\n \"\"\"\n self._grid = grid\n self._data_all = data\n self.label = label\n self._logger = logging.getLogger(self.__class__.__name__)\n\n def __init_subclass__(cls, **kwargs): # @NoSelf\n \"\"\"register all subclassess to reconstruct them later\"\"\"\n super().__init_subclass__(**kwargs)\n cls._subclasses[cls.__name__] = cls\n\n @property\n def data(self) -> np.ndarray:\n \"\"\":class:`~numpy.ndarray`: discretized data at the support points\"\"\"\n return self._data_valid\n\n @data.setter\n def data(self, value: NumberOrArray) -> None:\n \"\"\"set the valid data of the field\n\n Args:\n value:\n The value of the valid data. If a scalar is supplied all data points get\n the same value. The value of ghost cells are not changed.\n \"\"\"\n if isinstance(value, FieldBase):\n # copy data into current field\n self.assert_field_compatible(value, accept_scalar=True)\n self._data_valid[:] = value.data\n else:\n self._data_valid[:] = value\n\n @property\n def _idx_valid(self) -> Tuple[slice, ...]:\n \"\"\"tuple: slices to extract valid data from full data\"\"\"\n idx_comp = (slice(None),) * (self.__data_all.ndim - self.grid.num_axes)\n return idx_comp + self.grid._idx_valid\n\n @property\n def _data_all(self) -> np.ndarray:\n \"\"\":class:`~numpy.ndarray`: the full data including ghost cells\"\"\"\n return self.__data_all\n\n @_data_all.setter\n def _data_all(self, value: NumberOrArray) -> None:\n \"\"\"set the full data including ghost cells\n\n Args:\n value:\n The value of the full data including those for ghost cells. If a scalar\n is supplied all data points get the same value.\n \"\"\"\n if not self.writeable:\n raise ValueError(\"assignment destination is read-only\")\n\n if np.isscalar(value):\n # supplied value is a scalar\n self.__data_all[:] = value\n\n elif isinstance(value, np.ndarray):\n # check the shape of the supplied array\n if value.shape[-self.grid.num_axes :] != self.grid._shape_full:\n raise ValueError(\n f\"Supplied data has wrong shape: {value.shape} is not compatible with \"\n f\"{self.grid._shape_full}\"\n )\n # actually set the data\n self.__data_all = value\n\n else:\n raise TypeError(f\"Cannot set field values to {value}\")\n\n # set reference to valid data\n self._data_valid = self.__data_all[self._idx_valid]\n\n @property\n def _data_flat(self) -> np.ndarray:\n \"\"\":class:`~numpy.ndarray`: flat version of discretized data with ghost cells\"\"\"\n # flatten the first dimension of the internal data\n full_shape = tuple(s + 2 for s in self.grid.shape)\n return self._data_all.reshape(-1, *full_shape)\n\n @_data_flat.setter\n def _data_flat(self, value: np.ndarray) -> None:\n \"\"\"set the full data including ghost cells from a flattened array\"\"\"\n # simply set the data -> this might need to be overwritten\n self._data_all = value\n\n @property\n def writeable(self) -> bool:\n \"\"\"bool: whether the field data can be changed or not\"\"\"\n return not hasattr(self, \"_data_all\") or self._data_all.flags.writeable\n\n @writeable.setter\n def writeable(self, value: bool) -> None:\n \"\"\"set whether the field data can be changed or not\"\"\"\n self._data_all.flags.writeable = value\n self._data_valid.flags.writeable = value\n\n @property\n def label(self) -> Optional[str]:\n \"\"\"str: the name of the field\"\"\"\n return self._label\n\n @label.setter\n def label(self, value: str = None):\n \"\"\"set the new label of the field\"\"\"\n if value is None or isinstance(value, str):\n self._label = value\n else:\n raise TypeError(\"Label must be a string or None\")\n\n @classmethod\n def from_state(\n cls, attributes: Dict[str, Any], data: np.ndarray = None\n ) -> FieldBase:\n \"\"\"create a field from given state.\n\n Args:\n attributes (dict):\n The attributes that describe the current instance\n data (:class:`~numpy.ndarray`, optional):\n Data values at the support points of the grid defining the field\n \"\"\"\n # base class was chosen => select correct class from attributes\n class_name = attributes.pop(\"class\")\n\n if class_name == cls.__name__:\n raise RuntimeError(f\"Cannot reconstruct abstract class `{class_name}`\")\n\n # call possibly overwritten classmethod from subclass\n return cls._subclasses[class_name].from_state(attributes, data)\n\n @classmethod\n def from_file(cls, filename: str) -> FieldBase:\n \"\"\"create field by reading file\n\n Args:\n filename (str): Path to the file being read\n \"\"\"\n import h5py\n\n from .collection import FieldCollection\n\n with h5py.File(filename, \"r\") as fp:\n if \"class\" in fp.attrs:\n # this should be a field collection\n assert json.loads(fp.attrs[\"class\"]) == \"FieldCollection\"\n obj = FieldCollection._from_hdf_dataset(fp)\n\n elif len(fp) == 1:\n # a single field is stored in the data\n dataset = fp[list(fp.keys())[0]] # retrieve only dataset\n obj = cls._from_hdf_dataset(dataset) # type: ignore\n\n else:\n raise RuntimeError(\n \"Multiple data fields were found in the \"\n \"file but no FieldCollection is expected\"\n )\n return obj\n\n @classmethod\n def _from_hdf_dataset(cls, dataset) -> FieldBase:\n \"\"\"construct a field by reading data from an hdf5 dataset\"\"\"\n # copy attributes from hdf\n attributes = dict(dataset.attrs)\n\n # determine class\n class_name = json.loads(attributes.pop(\"class\"))\n field_cls = cls._subclasses[class_name]\n\n # unserialize the attributes\n attributes = field_cls.unserialize_attributes(attributes)\n return field_cls.from_state(attributes, data=dataset)\n\n @property\n def grid(self) -> GridBase:\n \"\"\"GridBase: The grid on which the field is defined\"\"\"\n return self._grid\n\n def to_file(self, filename: str, **kwargs):\n r\"\"\"store field in a file\n\n The extension of the filename determines what format is being used. If\n it ends in `.h5` or `.hdf`, the Hierarchical Data Format is used. The\n other supported format are images, where only the most typical formats\n are supported.\n\n Args:\n filename (str):\n Path where the data is stored\n metadata (dict):\n A dictionary of additional information that is stored with the file.\n Note that not all formats support metadata.\n \\**kwargs:\n Additional parameters may be supported for some formats\n \"\"\"\n extension = Path(filename).suffix.lower()\n\n if extension in {\".hdf\", \".hdf5\", \".he5\", \".h5\"}:\n # save data in hdf5 format\n import h5py\n\n with h5py.File(filename, \"w\") as fp:\n self._write_hdf_dataset(fp, **kwargs)\n\n elif extension in {\".png\", \".jpg\", \".jpeg\", \".tif\", \".pdf\", \".svg\"}:\n # save data as an image\n self._write_to_image(filename, **kwargs)\n\n else:\n raise ValueError(f\"Do not know how to save data to `*{extension}`\")\n\n def _write_hdf_dataset(self, hdf_path, key: str = \"data\"):\n \"\"\"write data to a given hdf5 path `hdf_path`\"\"\"\n # write the data\n dataset = hdf_path.create_dataset(key, data=self.data)\n\n # write attributes\n for key, value in self.attributes_serialized.items():\n dataset.attrs[key] = value\n\n def _write_to_image(self, filename: str, **kwargs):\n \"\"\"write data to image\n\n Args:\n filename (str): The path to the image that will be created\n \"\"\"\n raise NotImplementedError(f\"Cannot save {self.__class__.__name__} as an image\")\n\n @abstractmethod\n def copy(\n self: TField,\n *,\n label: str = None,\n dtype=None,\n ) -> TField:\n pass\n\n def assert_field_compatible(self, other: FieldBase, accept_scalar: bool = False):\n \"\"\"checks whether `other` is compatible with the current field\n\n Args:\n other (FieldBase):\n The other field this one is compared to\n accept_scalar (bool, optional):\n Determines whether it is acceptable that `other` is an instance of\n :class:`~pde.fields.ScalarField`.\n \"\"\"\n from .scalar import ScalarField # @Reimport\n\n # check whether they are the same class\n is_scalar = accept_scalar and isinstance(other, ScalarField)\n class_compatible = self.__class__ == other.__class__ or is_scalar\n if not class_compatible:\n raise TypeError(\"Fields are incompatible\")\n\n # check whether the associated grids are identical\n if not self.grid.compatible_with(other.grid):\n raise ValueError(\"Grids incompatible\")\n\n @property\n def dtype(self):\n \"\"\"returns the numpy dtype of the underlying data\"\"\"\n # this property is necessary to support np.iscomplexobj for DataFieldBases\n return self.data.dtype\n\n @property\n def is_complex(self) -> bool:\n \"\"\"bool: whether the field contains real or complex data\"\"\"\n return np.iscomplexobj(self.data) # type: ignore\n\n @property\n def attributes(self) -> Dict[str, Any]:\n \"\"\"dict: describes the state of the instance (without the data)\"\"\"\n return {\n \"class\": self.__class__.__name__,\n \"grid\": self.grid,\n \"label\": self.label,\n }\n\n @property\n def attributes_serialized(self) -> Dict[str, str]:\n \"\"\"dict: serialized version of the attributes\"\"\"\n results = {}\n for key, value in self.attributes.items():\n if key == \"grid\":\n results[key] = value.state_serialized\n else:\n results[key] = json.dumps(value)\n return results\n\n @classmethod\n def unserialize_attributes(cls, attributes: Dict[str, str]) -> Dict[str, Any]:\n \"\"\"unserializes the given attributes\n\n Args:\n attributes (dict):\n The serialized attributes\n\n Returns:\n dict: The unserialized attributes\n \"\"\"\n # base class was chosen => select correct class from attributes\n class_name = json.loads(attributes[\"class\"])\n\n if class_name == cls.__name__:\n raise RuntimeError(f\"Cannot reconstruct abstract class `{class_name}`\")\n\n # call possibly overwritten classmethod from subclass\n return cls._subclasses[class_name].unserialize_attributes(attributes)\n\n def __eq__(self, other):\n \"\"\"test fields for equality, ignoring the label\"\"\"\n if not isinstance(other, self.__class__):\n return NotImplemented\n return self.grid == other.grid and np.array_equal(self.data, other.data)\n\n def _unary_operation(self: TField, op: Callable) -> TField:\n \"\"\"perform an unary operation on this field\n\n Args:\n op (callable):\n A function calculating the result\n\n Returns:\n FieldBase: An field that contains the result of the operation.\n \"\"\"\n data = op(self.data)\n result = self.copy(dtype=data.dtype)\n result.data = data\n return result\n\n @property\n def real(self: TField) -> TField:\n \"\"\":class:`FieldBase`: Real part of the field\"\"\"\n return self._unary_operation(np.real)\n\n @property\n def imag(self: TField) -> TField:\n \"\"\":class:`FieldBase`: Imaginary part of the field\"\"\"\n return self._unary_operation(np.imag)\n\n def conjugate(self: TField) -> TField:\n \"\"\"returns complex conjugate of the field\"\"\"\n return self._unary_operation(np.conjugate)\n\n def __neg__(self):\n \"\"\"return the negative of the current field\"\"\"\n return self._unary_operation(np.negative)\n\n def _binary_operation(\n self, other, op: Callable, scalar_second: bool = True\n ) -> FieldBase:\n \"\"\"perform a binary operation between this field and `other`\n\n Args:\n other (number of FieldBase):\n The second term of the operator\n op (callable):\n A binary function calculating the result\n scalar_second (bool):\n Flag determining whether the second operator must be a scalar\n\n Returns:\n FieldBase: An field that contains the result of the operation. If\n `scalar_second == True`, the type of FieldBase is the same as `self`\n \"\"\"\n # determine the dtype of the output\n\n if isinstance(other, FieldBase):\n # right operator is a field\n from .scalar import ScalarField # @Reimport\n\n # determine the dtype of the result of the operation\n dtype = np.result_type(self.data, other.data)\n\n if scalar_second:\n # right operator must be a scalar or scalar field\n if not isinstance(other, ScalarField):\n raise TypeError(\"Right operator must be a scalar field\")\n self.grid.assert_grid_compatible(other.grid)\n result: FieldBase = self.copy(dtype=dtype)\n\n elif isinstance(self, ScalarField):\n # left operator is a scalar field (right can be tensor)\n self.grid.assert_grid_compatible(other.grid)\n result = other.copy(dtype=dtype)\n\n else:\n # left operator is tensor and right one might be anything\n self.assert_field_compatible(other, accept_scalar=True)\n result = self.copy(dtype=dtype)\n\n op(self.data, other.data, out=result.data)\n\n else:\n # the second operator is a number or a numpy array\n dtype = np.result_type(self.data, other)\n result = self.copy(dtype=dtype)\n op(self.data, other, out=result.data)\n\n return result\n\n def _binary_operation_inplace(\n self: TField, other, op_inplace: Callable, scalar_second: bool = True\n ) -> TField:\n \"\"\"perform an in-place binary operation between this field and `other`\n\n Args:\n other (number of FieldBase):\n The second term of the operator\n op_inplace (callable):\n A binary function storing its result in the first argument\n scalar_second (bool):\n Flag determining whether the second operator must be a scalar.\n\n Returns:\n FieldBase: The field `self` with updated data\n \"\"\"\n if isinstance(other, FieldBase):\n # right operator is a field\n from .scalar import ScalarField # @Reimport\n\n if scalar_second:\n # right operator must be a scalar\n if not isinstance(other, ScalarField):\n raise TypeError(\"Right operator must be a scalar field\")\n self.grid.assert_grid_compatible(other.grid)\n else:\n # left operator is tensor and right one might be anything\n self.assert_field_compatible(other, accept_scalar=True)\n\n # operators only affect the valid data and do not touch the ghost cells\n op_inplace(self.data, other.data, out=self.data)\n\n else:\n # the second operator is a number or a numpy array\n op_inplace(self.data, other, out=self.data)\n\n return self\n\n def __add__(self, other) -> FieldBase:\n \"\"\"add two fields\"\"\"\n return self._binary_operation(other, np.add, scalar_second=False)\n\n __radd__ = __add__\n\n def __iadd__(self: TField, other) -> TField:\n \"\"\"add `other` to the current field\"\"\"\n return self._binary_operation_inplace(other, np.add, scalar_second=False)\n\n def __sub__(self, other) -> FieldBase:\n \"\"\"subtract two fields\"\"\"\n return self._binary_operation(other, np.subtract, scalar_second=False)\n\n def __rsub__(self, other) -> FieldBase:\n \"\"\"subtract two fields\"\"\"\n return self._binary_operation(\n other, lambda x, y, out: np.subtract(y, x, out=out), scalar_second=False\n )\n\n def __isub__(self: TField, other) -> TField:\n \"\"\"add `other` to the current field\"\"\"\n return self._binary_operation_inplace(other, np.subtract, scalar_second=False)\n\n def __mul__(self, other) -> FieldBase:\n \"\"\"multiply field by value\"\"\"\n return self._binary_operation(other, np.multiply, scalar_second=False)\n\n __rmul__ = __mul__\n\n def __imul__(self: TField, other) -> TField:\n \"\"\"multiply field by value\"\"\"\n return self._binary_operation_inplace(other, np.multiply, scalar_second=False)\n\n def __truediv__(self, other) -> FieldBase:\n \"\"\"divide field by value\"\"\"\n return self._binary_operation(other, np.true_divide, scalar_second=True)\n\n def __itruediv__(self: TField, other) -> TField:\n \"\"\"divide field by value\"\"\"\n return self._binary_operation_inplace(other, np.true_divide, scalar_second=True)\n\n def __pow__(self, exponent: float) -> FieldBase:\n \"\"\"raise data of the field to a certain power\"\"\"\n if not np.isscalar(exponent):\n raise NotImplementedError(\"Only scalar exponents are supported\")\n return self._binary_operation(exponent, np.power, scalar_second=True)\n\n def __ipow__(self: TField, exponent: float) -> TField:\n \"\"\"raise data of the field to a certain power in-place\"\"\"\n if not np.isscalar(exponent):\n raise NotImplementedError(\"Only scalar exponents are supported\")\n self.data **= exponent\n return self\n\n def apply(\n self: TField, func: Callable, out: Optional[TField] = None, label: str = None\n ) -> TField:\n \"\"\"applies a function to the data and returns it as a field\n\n Args:\n func (callable or str):\n The (vectorized) function being applied to the data or the name\n of an operator that is defined for the grid of this field.\n out (FieldBase, optional):\n Optional field into which the data is written\n label (str, optional):\n Name of the returned field\n\n Returns:\n Field with new data. This is stored at `out` if given.\n \"\"\"\n if out is None:\n out = self.copy(label=label)\n out.data = func(self.data)\n else:\n self.assert_field_compatible(out)\n out.data[:] = func(self.data)\n if label:\n out.label = label\n return out\n\n @abstractmethod\n def get_line_data(\n self, scalar: str = \"auto\", extract: str = \"auto\"\n ) -> Dict[str, Any]:\n pass\n\n @abstractmethod\n def get_image_data(self) -> Dict[str, Any]:\n pass\n\n @abstractmethod\n def plot(self, *args, **kwargs):\n pass\n\n @abstractmethod\n def _get_napari_data(self, **kwargs) -> Dict[str, Dict[str, Any]]:\n pass\n\n def plot_interactive(self, viewer_args: Dict[str, Any] = None, **kwargs):\n \"\"\"create an interactive plot of the field using :mod:`napari`\n\n For a detailed description of the launched program, see the\n `napari webpage <http://napari.org/>`_.\n\n Args:\n viewer_args (dict):\n Arguments passed to :class:`napari.viewer.Viewer` to affect the viewer.\n **kwargs:\n Extra arguments passed to the plotting function\n \"\"\"\n if viewer_args is None:\n viewer_args = {}\n\n if self.grid.num_axes == 1:\n raise RuntimeError(\n \"Interactive plotting needs at least 2 spatial dimensions\"\n )\n\n with napari_viewer(self.grid, **viewer_args) as viewer:\n napari_add_layers(viewer, self._get_napari_data(**kwargs))\n\n\nTDataField = TypeVar(\"TDataField\", bound=\"DataFieldBase\")\n\n\nclass DataFieldBase(FieldBase, metaclass=ABCMeta):\n \"\"\"abstract base class for describing fields of single entities\"\"\"\n\n rank: int # the rank of the tensor field\n\n def __init__(\n self,\n grid: GridBase,\n data: Optional[Union[ArrayLike, str]] = \"zeros\",\n *,\n label: str = None,\n dtype=None,\n with_ghost_cells: bool = False,\n ):\n \"\"\"\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined.\n data (Number or :class:`~numpy.ndarray`, optional):\n Field values at the support points of the grid. The flag\n `with_ghost_cells` determines whether this data array contains values\n for the ghost cells, too. The resulting field will contain real data\n unless the `data` argument contains complex values. Special values are\n \"zeros\" or None, initializing the field with zeros, and \"empty\", just\n allocating memory with unspecified values.\n label (str, optional):\n Name of the field\n dtype (numpy dtype):\n The data type of the field. All the numpy dtypes are supported. If\n omitted, it will be determined from `data` automatically.\n with_ghost_cells (bool):\n Indicates whether the ghost cells are included in data\n \"\"\"\n if isinstance(data, self.__class__):\n # special case where a DataFieldBase is supplied\n data_arr = number_array(data._data_all, dtype=dtype, copy=True)\n super().__init__(grid, data=data_arr, label=label)\n\n elif with_ghost_cells:\n # use full data without copying (unless necessary)\n if data is None or isinstance(data, str):\n raise ValueError(\"`data` must be supplied if with_ghost_cells==True\")\n data_arr = number_array(data, dtype=dtype, copy=False)\n super().__init__(grid, data=data_arr, label=label)\n\n else:\n # ghost cells are not supplied => allocate full array and write valid data\n full_shape = (grid.dim,) * self.rank + tuple(s + 2 for s in grid.shape)\n\n if data is None:\n # fill full data with zeros by default\n data_arr = np.zeros(full_shape, dtype=dtype)\n super().__init__(grid, data=data_arr, label=label)\n\n elif isinstance(data, str):\n # allocate empty data\n if data == \"empty\":\n data_arr = np.empty(full_shape, dtype=dtype)\n elif data == \"zeros\":\n data_arr = np.zeros(full_shape, dtype=dtype)\n elif data == \"ones\":\n data_arr = np.ones(full_shape, dtype=dtype)\n else:\n raise ValueError(f\"Unknown data '{data}'\")\n super().__init__(grid, data=data_arr, label=label)\n\n elif isinstance(data, DataFieldBase):\n # copy the full data from the supplied field\n grid.assert_grid_compatible(data.grid)\n data_arr = number_array(data._data_all, dtype=dtype, copy=True)\n super().__init__(grid, data=data_arr, label=label)\n\n else:\n # initialize empty data and set the valid data\n data_arr = number_array(data, dtype=dtype, copy=False)\n empty_data = np.empty(full_shape, dtype=data_arr.dtype)\n super().__init__(grid, data=empty_data, label=label)\n self.data = data_arr\n\n def __repr__(self) -> str:\n \"\"\"return instance as string\"\"\"\n class_name = self.__class__.__name__\n result = f\"{class_name}(grid={self.grid!r}, data={self.data}\"\n if self.label:\n result += f', label=\"{self.label}\"'\n return result + \")\"\n\n def __str__(self) -> str:\n \"\"\"return instance as string\"\"\"\n result = (\n f\"{self.__class__.__name__}(grid={self.grid}, \"\n f\"data=Array{self.data.shape}\"\n )\n if self.label:\n result += f', label=\"{self.label}\"'\n return result + \")\"\n\n @classmethod\n def random_uniform(\n cls,\n grid: GridBase,\n vmin: float = 0,\n vmax: float = 1,\n label: Optional[str] = None,\n rng: np.random.Generator = None,\n ):\n \"\"\"create field with uniform distributed random values\n\n These values are uncorrelated in space.\n\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined\n vmin (float):\n Smallest possible random value\n vmax (float):\n Largest random value\n label (str, optional):\n Name of the field\n rng (:class:`~numpy.random.Generator`):\n Random number generator (default: :func:`~numpy.random.default_rng()`)\n \"\"\"\n if rng is None:\n rng = np.random.default_rng()\n\n shape = (grid.dim,) * cls.rank + grid.shape\n data = rng.uniform(vmin, vmax, size=shape)\n return cls(grid, data=data, label=label)\n\n @classmethod\n def random_normal(\n cls,\n grid: GridBase,\n mean: float = 0,\n std: float = 1,\n scaling: str = \"physical\",\n label: Optional[str] = None,\n rng: np.random.Generator = None,\n ):\n \"\"\"create field with normal distributed random values\n\n These values are uncorrelated in space.\n\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined\n mean (float):\n Mean of the Gaussian distribution\n std (float):\n Standard deviation of the Gaussian distribution\n scaling (str):\n Determines how the values are scaled. Possible choices are\n 'none' (values are drawn from a normal distribution with\n given mean and standard deviation) or 'physical' (the variance\n of the random number is scaled by the inverse volume of the grid\n cell; this is useful for physical quantities, which vary less in\n larger volumes).\n label (str, optional):\n Name of the field\n rng (:class:`~numpy.random.Generator`):\n Random number generator (default: :func:`~numpy.random.default_rng()`)\n \"\"\"\n if rng is None:\n rng = np.random.default_rng()\n\n if scaling == \"none\":\n noise_scale = std\n elif scaling == \"physical\":\n noise_scale = std / np.sqrt(grid.cell_volumes)\n else:\n raise ValueError(f\"Unknown noise scaling {scaling}\")\n\n shape = (grid.dim,) * cls.rank + grid.shape\n data = mean + noise_scale * rng.normal(size=shape)\n return cls(grid, data=data, label=label)\n\n @classmethod\n def random_harmonic(\n cls,\n grid: GridBase,\n modes: int = 3,\n harmonic=np.cos,\n axis_combination=np.multiply,\n label: Optional[str] = None,\n rng: np.random.Generator = None,\n ):\n r\"\"\"create a random field build from harmonics\n\n The resulting fields will be highly correlated in space and can thus\n serve for testing differential operators.\n\n With the default settings, the resulting field :math:`c_i(\\mathbf{x})`\n is given by\n\n .. math::\n c_i(\\mathbf{x}) = \\prod_{\\alpha=1}^N \\sum_{j=1}^M a_{ij\\alpha}\n \\cos\\left(\\frac{2 \\pi x_\\alpha}{j L_\\alpha}\\right) \\;,\n\n where :math:`N` is the number of spatial dimensions, each with length\n :math:`L_\\alpha`, :math:`M` is the number of modes given by `modes`, and\n :math:`a_{ij\\alpha}` are random amplitudes, chosen from a uniform\n distribution over the interval [0, 1].\n\n Note that the product could be replaced by a sum when\n `axis_combination = numpy.add` and the :math:`\\cos()` could be any other\n function given by the parameter `harmonic`.\n\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined\n modes (int):\n Number :math:`M` of harmonic modes\n harmonic (callable):\n Determines which harmonic function is used. Typical values are\n :func:`numpy.sin` and :func:`numpy.cos`, which basically relate\n to different boundary conditions applied at the grid boundaries.\n axis_combination (callable):\n Determines how values from different axis are combined. Typical\n choices are :func:`numpy.multiply` and :func:`numpy.add`\n resulting in products and sums of the values along axes,\n respectively.\n label (str, optional):\n Name of the field\n rng (:class:`~numpy.random.Generator`):\n Random number generator (default: :func:`~numpy.random.default_rng()`)\n \"\"\"\n if rng is None:\n rng = np.random.default_rng()\n\n tensor_shape = (grid.dim,) * cls.rank\n\n data = np.empty(tensor_shape + grid.shape)\n # determine random field for each component\n for index in np.ndindex(*tensor_shape):\n data_axis = []\n # random harmonic function along each axis\n for i in range(len(grid.axes)):\n # choose wave vectors\n ampl = rng.random(size=modes) # amplitudes\n x = discretize_interval(0, 2 * np.pi, grid.shape[i])[0]\n data_axis.append(\n sum(a * harmonic(n * x) for n, a in enumerate(ampl, 1))\n )\n # full dataset is product of values along axes\n data[index] = functools.reduce(axis_combination.outer, data_axis)\n\n return cls(grid, data=data, label=label)\n\n @classmethod\n def random_colored(\n cls,\n grid: GridBase,\n exponent: float = 0,\n scale: float = 1,\n label: Optional[str] = None,\n rng: np.random.Generator = None,\n ):\n r\"\"\"create a field of random values with colored noise\n\n The spatially correlated values obey\n\n .. math::\n \\langle c_i(\\boldsymbol k) c_j(\\boldsymbol k’) \\rangle =\n \\Gamma^2 |\\boldsymbol k|^\\nu \\delta_{ij}\n \\delta(\\boldsymbol k - \\boldsymbol k’)\n\n in spectral space. The special case :math:`\\nu = 0` corresponds to white\n noise. Note that the components of vector or tensor fields are\n uncorrelated.\n\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n Grid defining the space on which this field is defined\n exponent (float):\n Exponent :math:`\\nu` of the power spectrum\n scale (float):\n Scaling factor :math:`\\Gamma` determining noise strength\n label (str, optional):\n Name of the field\n rng (:class:`~numpy.random.Generator`):\n Random number generator (default: :func:`~numpy.random.default_rng()`)\n \"\"\"\n # get function making colored noise\n from ..tools.spectral import make_colored_noise\n\n make_noise = make_colored_noise(\n grid.shape, dx=grid.discretization, exponent=exponent, scale=scale, rng=rng\n )\n\n # create random fields for each tensor component\n tensor_shape = (grid.dim,) * cls.rank\n data = np.empty(tensor_shape + grid.shape)\n # determine random field for each component\n for index in np.ndindex(*tensor_shape):\n data[index] = make_noise()\n\n return cls(grid, data=data, label=label)\n\n @classmethod\n def get_class_by_rank(cls, rank: int) -> Type[DataFieldBase]:\n \"\"\"return a :class:`DataFieldBase` subclass describing a field with a given rank\n\n Args:\n rank (int): The rank of the tensor field\n \"\"\"\n for field_cls in cls._subclasses.values():\n if (\n issubclass(field_cls, DataFieldBase)\n and not isabstract(field_cls)\n and field_cls.rank == rank\n ):\n return field_cls\n raise RuntimeError(f\"Could not find field class for rank {rank}\")\n\n @classmethod\n def from_state(\n cls, attributes: Dict[str, Any], data: np.ndarray = None\n ) -> DataFieldBase:\n \"\"\"create a field from given state.\n\n Args:\n attributes (dict):\n The attributes that describe the current instance\n data (:class:`~numpy.ndarray`, optional):\n Data values at the support points of the grid defining the field\n \"\"\"\n if \"class\" in attributes:\n class_name = attributes.pop(\"class\")\n assert class_name == cls.__name__\n\n # create the instance from the attributes\n return cls(attributes.pop(\"grid\"), data=data, **attributes)\n\n def copy(\n self: TDataField,\n *,\n label: str = None,\n dtype=None,\n ) -> TDataField:\n \"\"\"return a copy of the data, but not of the grid\n\n Args:\n label (str, optional):\n Name of the returned field\n dtype (numpy dtype):\n The data type of the field. If omitted, it will be determined from\n `data` automatically or the dtype of the current field is used.\n \"\"\"\n if label is None:\n label = self.label\n\n return self.__class__(\n self.grid,\n data=np.array(self._data_all, dtype=dtype, copy=True),\n label=label,\n dtype=dtype,\n with_ghost_cells=True,\n )\n\n @property\n def data_shape(self) -> Tuple[int, ...]:\n \"\"\"tuple: the shape of the data at each grid point\"\"\"\n return (self.grid.dim,) * self.rank\n\n @classmethod\n def unserialize_attributes(cls, attributes: Dict[str, str]) -> Dict[str, Any]:\n \"\"\"unserializes the given attributes\n\n Args:\n attributes (dict):\n The serialized attributes\n\n Returns:\n dict: The unserialized attributes\n \"\"\"\n results = {}\n for key, value in attributes.items():\n if key == \"grid\":\n results[key] = GridBase.from_state(value)\n else:\n results[key] = json.loads(value)\n return results\n\n def _write_to_image(self, filename: str, **kwargs):\n r\"\"\"write data to image\n\n Args:\n filename (str):\n The path to the image that will be created\n \\**kwargs:\n Additional keyword arguments that affect the image. For\n instance, some fields support a `scalar` argument that\n determines how they are converted to a scalar. Non-Cartesian\n grids might support a `performance_goal` argument to influence\n how an image is created from the raw data. Finally, the\n remaining arguments are are passed to\n :func:`matplotlib.pyplot.imsave` to affect the appearance.\n \"\"\"\n import matplotlib.pyplot as plt\n\n # obtain image data\n get_image_args = {}\n for key in [\"performance_goal\", \"scalar\"]:\n if key in kwargs:\n get_image_args[key] = kwargs.pop(key)\n img = self.get_image_data(**get_image_args)\n\n kwargs.setdefault(\"cmap\", \"gray\")\n plt.imsave(filename, img[\"data\"].T, origin=\"lower\", **kwargs)\n\n def _make_interpolator_scipy(\n self, method: str = \"linear\", fill: Number = None, **kwargs\n ) -> Callable[[np.ndarray, np.ndarray], NumberOrArray]:\n r\"\"\"returns a function that can be used to interpolate values.\n\n This uses :class:`scipy.interpolate.RegularGridInterpolator` and thus supports\n extra options supplied by keyword arguments. Note that this interpolator does\n not respect periodic boundary conditions, yet.\n\n Args:\n method (str):\n The method used for interpolation. Currently, \"linear\" and \"nearest\" are\n supported by :class:`~scipy.interpolate.RegularGridInterpolator`.\n fill (Number, optional):\n Determines how values out of bounds are handled. If `None`, a\n `ValueError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n \\**kwargs: All keyword arguments are forwarded to\n :class:`~scipy.interpolate.RegularGridInterpolator`\n\n Returns:\n A function which returns interpolated values when called with\n arbitrary positions within the space of the grid.\n \"\"\"\n from scipy import interpolate\n\n coords_src = self.grid.axes_coords\n grid_dim = len(self.grid.axes)\n\n if self.rank == 0:\n # scalar field => data layout is already usable\n data = self.data\n revert_shape = False\n else:\n # spatial dimensions need to come first => move data to last axis\n assert self.data.shape[:-grid_dim] == self.data_shape\n idx = (slice(None),) + (slice(1, -1),) * self.grid.num_axes\n data_flat = self._data_flat[idx]\n data_flat = np.moveaxis(data_flat, 0, -1)\n new_shape = self.grid.shape + (-1,)\n data = data_flat.reshape(new_shape)\n assert data.shape[-1] == self.grid.dim ** self.rank\n revert_shape = True\n\n # set the fill behavior\n if fill is None:\n kwargs[\"bounds_error\"] = True\n else:\n kwargs[\"bounds_error\"] = False\n kwargs[\"fill_value\"] = fill\n\n # prepare the interpolator\n intp = interpolate.RegularGridInterpolator(\n coords_src, data, method=method, **kwargs\n )\n\n # determine under which conditions the axes can be squeezed\n if grid_dim == 1:\n scalar_dim = 0\n else:\n scalar_dim = 1\n\n # introduce wrapper function to process arrays\n def interpolator(point: np.ndarray, **kwargs) -> NumberOrArray:\n \"\"\"return the interpolated value at the position `point`\"\"\"\n point = np.atleast_1d(point)\n # apply periodic boundary conditions to grid point\n point = self.grid.normalize_point(point, reflect=False)\n out = intp(point, **kwargs)\n if point.ndim == scalar_dim or point.ndim == point.size == 1:\n out = out[0]\n if revert_shape:\n # revert the shuffling of spatial and local axes\n out = np.moveaxis(out, point.ndim - 1, 0)\n out = out.reshape(self.data_shape + point.shape[:-1])\n\n return out # type: ignore\n\n return interpolator # type: ignore\n\n @fill_in_docstring\n def _make_interpolator_numba(\n self, bc: Optional[BoundariesData] = \"natural\", fill: Number = None\n ) -> Callable[[np.ndarray, Optional[np.ndarray]], np.ndarray]:\n \"\"\"return a compiled interpolator\n\n This interpolator respects boundary conditions and can thus interpolate\n values in the whole grid volume. However, close to corners, the\n interpolation might not be optimal, in particular for periodic grids.\n\n Args:\n bc:\n The boundary conditions applied to the field.\n {ARG_BOUNDARIES_OPTIONAL}\n fill (Number, optional):\n Determines how values out of bounds are handled. If `None`, a\n `ValueError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n\n Returns:\n A function which returns interpolated values when called with\n arbitrary positions within the space of the grid.\n \"\"\"\n grid = self.grid\n grid_dim = len(grid.axes)\n data_shape = self.data_shape\n\n dim_error_msg = (\n f\"Dimension of interpolation point does not match grid dimension {grid_dim}\"\n )\n\n # convert `fill` to dtype of data\n if fill is not None:\n if self.rank == 0:\n fill = self.data.dtype.type(fill)\n else:\n fill = np.broadcast_to(fill, self.data_shape).astype(self.data.dtype)\n\n if bc is None:\n # use the full array and assume BCs are set via ghost points\n interpolate_single = grid.make_interpolator_full_compiled(fill=fill)\n # extract information about the data field\n get_data_array = make_array_constructor(self._data_all)\n\n else:\n # create an interpolator that sets the boundary conditions\n interpolate_single = grid.make_interpolator_compiled(\n bc=bc, rank=self.rank, fill=fill\n )\n # extract information about the data field\n get_data_array = make_array_constructor(self.data)\n\n @jit\n def interpolator(point: np.ndarray, data: np.ndarray = None) -> np.ndarray:\n \"\"\"return the interpolated value at the position `point`\n\n Args:\n point (:class:`~numpy.ndarray`):\n The list of points. This point coordinates should be given\n along the last axis, i.e., the shape should be `(..., dim)`.\n data (:class:`~numpy.ndarray`, optional):\n The discretized field values. If omitted, the data of the current\n field is used, which should be the default. However, this option can\n be useful to interpolate other fields defined on the same grid\n without recreating the interpolator. If a data array is supplied, it\n needs to be the valid data points when boundary conditions were\n specified. Otherwise, the full data, including the ghost points,\n needs to be given.\n\n Returns:\n :class:`~numpy.ndarray`: The interpolated values at the points\n \"\"\"\n # check input\n point = np.atleast_1d(point)\n if point.shape[-1] != grid_dim:\n raise DimensionError(dim_error_msg)\n point_shape = point.shape[:-1]\n\n # reconstruct data field from memory address\n if data is None:\n data = get_data_array()\n\n # interpolate at every valid point\n out = np.empty(data_shape + point_shape, dtype=data.dtype)\n for idx in np.ndindex(*point_shape):\n out[(...,) + idx] = interpolate_single(data, point[idx])\n\n return out\n\n # store a reference to the data so it is not garbage collected too early\n interpolator._data = self._data_all\n\n return interpolator # type: ignore\n\n @cached_method()\n def make_interpolator(\n self,\n backend: str = \"numba\",\n method: str = \"linear\",\n fill: Number = None,\n **kwargs,\n ) -> Callable[[np.ndarray, np.ndarray], NumberOrArray]:\n r\"\"\"returns a function that can be used to interpolate values.\n\n Args:\n backend (str):\n The accepted values `scipy` and `numba` determine the backend that is\n used for the interpolation.\n method (str):\n Determines the method being used for interpolation. Typical values that\n are \"nearest\" and \"linear\", but the supported values depend on the\n chosen `backend`.\n fill (Number, optional):\n Determines how values out of bounds are handled. If `None`, a\n `ValueError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n \\**kwargs:\n Additional keyword arguments are passed to the individual\n interpolator methods and can be used to further affect the\n behavior.\n\n The scipy implementations use scipy.interpolate.RegularGridInterpolator\n and thus do not respect boundary conditions. Additional keyword\n arguments are directly forwarded to the constructor of\n `RegularGridInterpolator`.\n\n The numba implementation respect boundary conditions, which can be set\n using the `bc` keywords argument. Supported values are the same as for\n the operators, e.g., the Laplacian. If no boundary conditions are\n specified, natural boundary conditions are assumed, which are periodic\n conditions for periodic axes and Neumann conditions otherwise.\n\n Returns:\n A function which returns interpolated values when called with\n arbitrary positions within the space of the grid.\n \"\"\"\n if backend == \"scipy\":\n return self._make_interpolator_scipy(method=method, fill=fill, **kwargs)\n elif backend == \"numba\":\n if method != \"linear\":\n raise NotImplementedError(\n \"The numba backend currently only supports linear interpolation\"\n )\n return self._make_interpolator_numba(fill=fill, **kwargs)\n else:\n raise ValueError(f\"Unknown backend `{backend}`\")\n\n def interpolate(\n self,\n point: np.ndarray,\n *,\n backend: str = \"numba\",\n method: str = \"linear\",\n fill: Number = None,\n **kwargs,\n ) -> NumberOrArray:\n r\"\"\"interpolate the field to points between support points\n\n Args:\n point (:class:`~numpy.ndarray`):\n The points at which the values should be obtained. This is given in grid\n coordinates.\n backend (str):\n The accepted values \"scipy\" and \"numba\" determine the backend that is\n used for the interpolation.\n method (str):\n Determines the method being used for interpolation. Typical values that\n are \"nearest\" and \"linear\", but the supported values depend on the\n chosen `backend`.\n fill (Number, optional):\n Determines how values out of bounds are handled. If `None`, a\n `ValueError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n \\**kwargs:\n Additional keyword arguments are forwarded to the method\n :meth:`DataFieldBase.make_interpolator`.\n\n Returns:\n :class:`~numpy.ndarray`: the values of the field\n \"\"\"\n interpolator = self.make_interpolator(\n backend=backend, method=method, fill=fill, **kwargs\n )\n return interpolator(np.asarray(point)) # type: ignore\n\n def interpolate_to_grid(\n self: TDataField,\n grid: GridBase,\n *,\n backend: str = \"numba\",\n method: str = \"linear\",\n fill: Number = None,\n label: Optional[str] = None,\n ) -> TDataField:\n \"\"\"interpolate the data of this field to another grid.\n\n Args:\n grid (:class:`~pde.grids.base.GridBase`):\n The grid of the new field onto which the current field is\n interpolated.\n backend (str):\n The accepted values \"scipy\" and \"numba\" determine the backend that is\n used for the interpolation.\n method (str):\n Determines the method being used for interpolation. Typical values that\n are \"nearest\" and \"linear\", but the supported values depend on the\n chosen `backend`.\n fill (Number, optional):\n Determines how values out of bounds are handled. If `None`, a\n `ValueError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n label (str, optional):\n Name of the returned field\n\n Returns:\n Field of the same rank as the current one.\n \"\"\"\n if self.grid.dim != grid.dim:\n raise DimensionError(\n f\"Grid dimensions are incompatible \"\n f\"({self.grid.dim:d} != {grid.dim:d})\"\n )\n\n # determine the points at which data needs to be calculated\n if isinstance(grid, CartesianGridBase):\n # convert to a Cartesian grid\n points = self.grid.point_from_cartesian(grid.cell_coords)\n\n elif self.grid.__class__ is grid.__class__:\n # convert within the same grid class\n points = grid.cell_coords\n\n else:\n # this type of interpolation is not supported\n grid_in = self.grid.__class__.__name__\n grid_out = grid.__class__.__name__\n raise NotImplementedError(f\"Cannot convert {grid_in} to {grid_out}\")\n\n # interpolate the data to the grid\n data = self.interpolate(points, backend=backend, method=method, fill=fill)\n return self.__class__(grid, data, label=label)\n\n def add_interpolated(self, point: np.ndarray, amount: ArrayLike) -> None:\n \"\"\"deprecated alias of method `insert`\"\"\"\n # this was deprecated on 2021-02-23\n warnings.warn(\n \"`add_interpolated` is deprecated. Use `insert` instead\",\n DeprecationWarning,\n )\n self.insert(point, amount)\n\n def insert(self, point: np.ndarray, amount: ArrayLike) -> None:\n \"\"\"adds an (integrated) value to the field at an interpolated position\n\n Args:\n point (:class:`~numpy.ndarray`):\n The point inside the grid where the value is added. This is\n given in grid coordinates.\n amount (Number or :class:`~numpy.ndarray`):\n The amount that will be added to the field. The value describes\n an integrated quantity (given by the field value times the\n discretization volume). This is important for consistency with\n different discretizations and in particular grids with\n non-uniform discretizations.\n \"\"\"\n point = np.atleast_1d(point)\n amount = np.broadcast_to(amount, self.data_shape)\n grid = self.grid\n grid_dim = len(grid.axes)\n\n if point.size != grid_dim or point.ndim != 1:\n raise DimensionError(f\"Dimension mismatch for point {point}\")\n\n # determine the grid coordinates next to the chosen points\n low = np.array(grid.axes_bounds)[:, 0]\n c_l, d_l = np.divmod((point - low) / grid.discretization - 0.5, 1.0)\n c_l = c_l.astype(np.intc) # support points to the left of the chosen points\n w_l = 1 - d_l # weights of the low point\n w_h = d_l # weights of the high point\n\n # apply periodic boundary conditions to grid coordinates\n c_h = c_l + 1 # support points to the right of the chosen points\n for ax in np.flatnonzero(grid.periodic):\n c_l[..., ax] %= grid.shape[ax]\n c_h[..., ax] %= grid.shape[ax]\n\n # determine the valid points and the total weight in first iteration\n total_weight = 0\n cells = []\n for i in np.ndindex(*((2,) * grid_dim)):\n coords = np.choose(i, [c_l, c_h])\n if np.all(coords >= 0) and np.all(coords < grid.shape):\n weight = np.prod(np.choose(i, [w_l, w_h]))\n total_weight += weight\n cells.append((tuple(coords), weight))\n\n if total_weight == 0:\n raise DomainError(\"Point lies outside grid\")\n\n # alter each point in second iteration\n for coords, weight in cells:\n chng = weight * amount / (total_weight * grid.cell_volumes[coords])\n self.data[(Ellipsis,) + coords] += chng\n\n @fill_in_docstring\n def get_boundary_values(\n self, axis: int, upper: bool, bc: Optional[BoundariesData] = \"natural\"\n ) -> NumberOrArray:\n \"\"\"get the field values directly on the specified boundary\n\n Args:\n axis (int):\n The axis perpendicular to the boundary\n upper (bool):\n Whether the boundary is at the upper side of the axis\n bc:\n The boundary conditions applied to the field.\n {ARG_BOUNDARIES_OPTIONAL}\n\n Returns:\n :class:`~numpy.ndarray`: The discretized values on the boundary\n \"\"\"\n interpolator = self.make_interpolator(bc=bc)\n points = self.grid._boundary_coordinates(axis, upper)\n return interpolator(points) # type: ignore\n\n @fill_in_docstring\n def make_get_boundary_values(\n self, axis: int, upper: bool, bc: Optional[BoundariesData] = \"natural\"\n ) -> Callable[[Optional[np.ndarray], Optional[np.ndarray]], NumberOrArray]:\n \"\"\"make a function calculating field values on the specified boundary\n\n Args:\n axis (int):\n The axis perpendicular to the boundary\n upper (bool):\n Whether the boundary is at the upper side of the axis\n bc:\n The boundary conditions applied to the field. {ARG_BOUNDARIES}\n\n Returns:\n callable: A function returning the values on the boundary. The\n function has the signature `(data=None, out=None)`, which allows\n specifying an input and an output :class:`~numpy.ndarray`. If `data`\n is omitted, the data of the current field is used. The resulting\n interpolation is written to `out` if it is present. Otherwise, a new\n array is created.\n \"\"\"\n interpolator = self.make_interpolator(bc=bc)\n points = self.grid._boundary_coordinates(axis, upper)\n\n # TODO: use jit_allocated_out with pre-calculated shape\n\n @jit\n def get_boundary_values(\n data: np.ndarray = None, out: np.ndarray = None\n ) -> NumberOrArray:\n \"\"\"interpolate the field at the boundary\n\n Args:\n data (:class:`~numpy.ndarray`, optional):\n The data values that are used for interpolation. The data of\n the current field is used if `data = None`.\n out (:class:`~numpy.ndarray`, optional):\n The array into which the interpolated results are written. A\n new array is created if `out = None`.\n\n Returns:\n :class:`~numpy.ndarray`: The interpolated values on the boundary.\n \"\"\"\n res = interpolator(points, data) # type: ignore\n if out is None:\n return res\n else:\n # the following just copies the data from res to out. It is a\n # workaround for a bug in numba existing up to at least version 0.49\n out[...] = res[()] # type: ignore\n return out\n\n return get_boundary_values # type: ignore\n\n @fill_in_docstring\n def set_ghost_cells(self, bc: BoundariesData, *, args=None) -> None:\n \"\"\"set the boundary values on virtual points for all boundaries\n\n Args:\n bc (str or list or tuple or dict):\n The boundary conditions applied to the field.\n {ARG_BOUNDARIES}\n args:\n Additional arguments that might be supported by special boundary\n conditions.\n \"\"\"\n bcs = self.grid.get_boundary_conditions(bc, rank=self.rank)\n bcs.set_ghost_cells(self._data_all, args=args)\n\n @abstractproperty\n def integral(self) -> NumberOrArray:\n pass\n\n @abstractmethod\n def to_scalar(\n self, scalar: str = \"auto\", *, label: Optional[str] = None\n ) -> \"ScalarField\":\n pass\n\n @property\n def average(self) -> NumberOrArray:\n \"\"\"determine the average of data\n\n This is calculated by integrating each component of the field over space\n and dividing by the grid volume\n \"\"\"\n return self.integral / self.grid.volume\n\n @property\n def fluctuations(self) -> NumberOrArray:\n \"\"\":class:`~numpy.ndarray`: fluctuations over the entire space.\n\n The fluctuations are defined as the standard deviation of the data scaled by the\n cell volume. This definition makes the fluctuations independent of the\n discretization. It corresponds to the physical scaling available in the\n :func:`~DataFieldBase.random_normal`.\n\n Returns:\n :class:`~numpy.ndarray`: A tensor with the same rank of the field,\n specifying the fluctuations of each component of the tensor field\n individually. Consequently, a simple scalar is returned for a\n :class:`~pde.fields.scalar.ScalarField`.\n \"\"\"\n scaled_data = self.data * np.sqrt(self.grid.cell_volumes)\n axes = tuple(range(self.rank, self.data.ndim))\n return np.std(scaled_data, axis=axes) # type: ignore\n\n @property\n def magnitude(self) -> float:\n \"\"\"float: determine the magnitude of the field.\n\n This is calculated by getting a scalar field using the default arguments of the\n :func:`to_scalar` method, averaging the result over the whole grid, and taking\n the absolute value.\n \"\"\"\n if self.rank == 0:\n return abs(self.average) # type: ignore\n elif self.rank > 0:\n return abs(self.to_scalar().average) # type: ignore\n else:\n raise AssertionError(\"Rank must be non-negative\")\n\n @fill_in_docstring\n def _apply_operator(\n self,\n operator: str,\n bc: Optional[\"BoundariesData\"],\n out: Optional[DataFieldBase] = None,\n *,\n label: str = None,\n **kwargs,\n ) -> DataFieldBase:\n r\"\"\"apply an operator and return result as a field\n\n Args:\n operator (str):\n An identifier determining the registered on the grid.\n bc:\n The boundary conditions applied to the field.\n {ARG_BOUNDARIES_OPTIONAL}\n out (ScalarField, optional):\n Optional scalar field to which the result is written.\n label (str, optional):\n Name of the returned field\n **kwargs:\n Additional arguments affecting how the operator behaves.\n\n Returns:\n Field with new data. This is stored at `out` if given.\n \"\"\"\n # get information about the operator\n operator_info = self.grid._get_operator_info(operator)\n out_cls = self.get_class_by_rank(operator_info.rank_out)\n\n # prepare the output field\n if out is None:\n out = out_cls(self.grid, data=\"empty\", label=label, dtype=self.dtype)\n elif not isinstance(out, out_cls):\n raise RankError(f\"`out` must be a {out_cls.__name__}\")\n else:\n self.grid.assert_grid_compatible(out.grid)\n if label is not None:\n out.label = label\n\n if bc is None:\n # apply the operator without imposing boundary conditions\n op_raw = self.grid.make_operator_no_bc(operator_info, **kwargs)\n op_raw(self._data_all, out.data)\n else:\n # apply the operator with boundary conditions\n op_with_bcs = self.grid.make_operator(operator_info, bc=bc, **kwargs)\n out.data[:] = op_with_bcs(self.data)\n\n return out\n\n def smooth(\n self: TDataField,\n sigma: float = 1,\n *,\n out: Optional[TDataField] = None,\n label: str = None,\n ) -> TDataField:\n \"\"\"applies Gaussian smoothing with the given standard deviation\n\n This function respects periodic boundary conditions of the underlying\n grid, using reflection when no periodicity is specified.\n\n sigma (float):\n Gives the standard deviation of the smoothing in real length units\n (default: 1)\n out (FieldBase, optional):\n Optional field into which the smoothed data is stored. Setting this\n to the input field enables in-place smoothing.\n label (str, optional):\n Name of the returned field\n\n Returns:\n Field with smoothed data. This is stored at `out` if given.\n \"\"\"\n from scipy import ndimage\n\n # allocate memory for storing output\n if out is None:\n out = self.__class__(self.grid, label=self.label)\n else:\n self.assert_field_compatible(out)\n\n # apply Gaussian smoothing for each axis\n data_in = self.data # use the field data as input\n data_out = out.data # write to the output\n for axis in range(-len(self.grid.axes), 0):\n sigma_dx = sigma / self.grid.discretization[axis]\n mode = \"wrap\" if self.grid.periodic[axis] else \"reflect\"\n ndimage.gaussian_filter1d(\n data_in, sigma=sigma_dx, axis=axis, output=data_out, mode=mode\n )\n data_in = data_out # use this smoothed data as input for next axis\n\n # return the data in the correct field class\n if label:\n out.label = label\n return out\n\n def get_line_data(\n self, scalar: str = \"auto\", extract: str = \"auto\"\n ) -> Dict[str, Any]:\n \"\"\"return data for a line plot of the field\n\n Args:\n scalar (str or int):\n The method for extracting scalars as described in\n :meth:`DataFieldBase.to_scalar`.\n extract (str):\n The method used for extracting the line data. See the docstring\n of the grid method `get_line_data` to find supported values.\n\n Returns:\n dict: Information useful for performing a line plot of the field\n \"\"\"\n # turn field into scalar field\n scalar_data = self.to_scalar(scalar).data\n\n # extract the line data\n data = self.grid.get_line_data(scalar_data, extract=extract)\n if \"label_y\" in data and data[\"label_y\"]:\n if self.label:\n data[\"label_y\"] = f\"{self.label} ({data['label_y']})\"\n else:\n data[\"label_y\"] = self.label\n return data\n\n def get_image_data(\n self, scalar: str = \"auto\", transpose: bool = False, **kwargs\n ) -> Dict[str, Any]:\n r\"\"\"return data for plotting an image of the field\n\n Args:\n scalar (str or int):\n The method for extracting scalars as described in\n :meth:`DataFieldBase.to_scalar`.\n transpose (bool):\n Determines whether the transpose of the data should is plotted\n \\**kwargs:\n Additional parameters are forwarded to `grid.get_image_data`\n\n Returns:\n dict: Information useful for plotting an image of the field\n \"\"\"\n # turn field into scalar field\n scalar_data = self.to_scalar(scalar).data\n\n # remove imaginary parts\n if self.is_complex:\n self._logger.warning(\"Only the absolute value of complex data is shown\")\n scalar_data = abs(scalar_data)\n\n # extract the image data\n data = self.grid.get_image_data(scalar_data, **kwargs) # type: ignore\n data[\"title\"] = self.label\n\n if transpose:\n # adjust image data such that the transpose is plotted\n data[\"data\"] = data[\"data\"].T\n data[\"label_x\"], data[\"label_y\"] = data[\"label_y\"], data[\"label_x\"]\n\n return data\n\n def get_vector_data(self, **kwargs) -> Dict[str, Any]:\n r\"\"\"return data for a vector plot of the field\n\n Args:\n \\**kwargs: Additional parameters are forwarded to\n `grid.get_image_data`\n\n Returns:\n dict: Information useful for plotting an vector field\n \"\"\"\n raise NotImplementedError()\n\n def _plot_line(\n self,\n ax,\n scalar: str = \"auto\",\n extract: str = \"auto\",\n ylabel: str = None,\n **kwargs,\n ) -> PlotReference:\n r\"\"\"visualize a field using a 1d line plot\n\n Args:\n ax (:class:`matplotlib.axes.Axes`):\n Figure axes to be used for plotting.\n scalar (str or int):\n The method for extracting scalars as described in\n :meth:`DataFieldBase.to_scalar`.\n extract (str):\n The method used for extracting the line data.\n ylabel (str):\n Label of the y-axis. If omitted, the label is chosen\n automatically from the data field.\n \\**kwargs:\n Additional keyword arguments are passed to\n :func:`matplotlib.pyplot.plot`\n\n Returns:\n :class:`PlotReference`: Instance that contains information to update\n the plot with new data later.\n \"\"\"\n # obtain data for the plot\n line_data = self.get_line_data(scalar=scalar, extract=extract)\n\n # warn if there is an imaginary part\n if np.any(np.iscomplex(line_data[\"data_y\"])):\n self._logger.warning(\"Only the real part of the complex data is shown\")\n\n # do the plot\n (line2d,) = ax.plot(line_data[\"data_x\"], line_data[\"data_y\"].real, **kwargs)\n\n # set some default properties\n ax.set_xlabel(line_data[\"label_x\"])\n if ylabel is None:\n ylabel = line_data.get(\"label_y\", self.label)\n if ylabel:\n ax.set_ylabel(ylabel)\n\n return PlotReference(ax, line2d, {\"scalar\": scalar, \"extract\": extract})\n\n def _update_line_plot(self, reference: PlotReference) -> None:\n \"\"\"update a line plot with the current field values\n\n Args:\n reference (:class:`PlotReference`):\n The reference to the plot that is updated\n \"\"\"\n import matplotlib as mpl\n\n # obtain data for the plot\n scalar = reference.parameters.get(\"scalar\", \"auto\")\n extract = reference.parameters.get(\"extract\", \"auto\")\n line_data = self.get_line_data(scalar=scalar, extract=extract)\n\n line2d = reference.element\n if isinstance(line2d, mpl.lines.Line2D):\n # update old plot\n line2d.set_xdata(line_data[\"data_x\"])\n line2d.set_ydata(line_data[\"data_y\"].real)\n\n else:\n raise ValueError(f\"Unsupported plot reference {reference}\")\n\n def _plot_image(\n self,\n ax,\n colorbar: bool = True,\n scalar: str = \"auto\",\n transpose: bool = False,\n **kwargs,\n ) -> PlotReference:\n r\"\"\"visualize a field using a 2d density plot\n\n Args:\n ax (:class:`matplotlib.axes.Axes`):\n Figure axes to be used for plotting.\n colorbar (bool):\n Determines whether a colorbar is shown\n scalar (str or int):\n The method for extracting scalars as described in\n :meth:`DataFieldBase.to_scalar`.\n transpose (bool):\n Determines whether the transpose of the data should is plotted\n \\**kwargs:\n Additional keyword arguments that affect the image. For instance, some\n fields support a `scalar` argument that determines how they are\n converted to a scalar. Non-Cartesian grids might support a\n `performance_goal` argument to influence how an image is created from\n the raw data. Finally, the remaining arguments are are passed to\n :func:`matplotlib.pyplot.imshow` to affect the appearance.\n\n Returns:\n :class:`PlotReference`: Instance that contains information to update\n the plot with new data later.\n \"\"\"\n # obtain image data with appropriate parameters\n data_kws = {}\n for arg in [\"performance_goal\", \"scalar\", \"transpose\"]:\n if arg in kwargs:\n data_kws[arg] = kwargs.pop(arg)\n data = self.get_image_data(scalar, transpose, **data_kws)\n\n if ax is None:\n import matplotlib.pyplot as plt\n\n # create new figure\n ax = plt.subplots()[1]\n\n # plot the image\n kwargs.setdefault(\"origin\", \"lower\")\n kwargs.setdefault(\"interpolation\", \"none\")\n axes_image = ax.imshow(data[\"data\"].T, extent=data[\"extent\"], **kwargs)\n\n # set some default properties\n ax.set_xlabel(data[\"label_x\"])\n ax.set_ylabel(data[\"label_y\"])\n ax.set_title(data.get(\"title\", self.label))\n\n if colorbar:\n from ..tools.plotting import add_scaled_colorbar\n\n add_scaled_colorbar(axes_image, ax=ax)\n\n parameters = {\"scalar\": scalar, \"transpose\": transpose}\n return PlotReference(ax, axes_image, parameters)\n\n def _update_image_plot(self, reference: PlotReference) -> None:\n \"\"\"update an image plot with the current field values\n\n Args:\n reference (:class:`PlotReference`):\n The reference to the plot that is updated\n \"\"\"\n # obtain image data\n p = reference.parameters\n data = self.get_image_data(\n scalar=p.get(\"scalar\", \"auto\"), transpose=p.get(\"transpose\", False)\n )\n\n # update the axes image\n reference.element.set_data(data[\"data\"].T)\n # adjust the colorbar limits\n reference.element.set_clim(data[\"data\"].min(), data[\"data\"].max())\n\n def _plot_vector(\n self,\n ax,\n method: str = \"quiver\",\n transpose: bool = False,\n max_points: int = 16,\n **kwargs,\n ) -> PlotReference:\n r\"\"\"visualize a field using a 2d vector plot\n\n Args:\n ax (:class:`matplotlib.axes.Axes`):\n Figure axes to be used for plotting.\n method (str):\n Plot type that is used. This can be either `quiver` or\n `streamplot`.\n transpose (bool):\n Determines whether the transpose of the data should be plotted.\n max_points (int):\n The maximal number of points that is used along each axis. This\n argument is only used for quiver plots.\n \\**kwargs:\n Additional keyword arguments are passed to\n :func:`matplotlib.pyplot.quiver` or\n :func:`matplotlib.pyplot.streamplot`.\n\n Returns:\n :class:`PlotReference`: Instance that contains information to update\n the plot with new data later.\n \"\"\"\n # store the parameters of this plot for later updating\n parameters = {\n \"method\": method,\n \"transpose\": transpose,\n \"kwargs\": kwargs,\n }\n\n if method == \"quiver\":\n # plot vector field using a quiver plot\n data = self.get_vector_data(transpose=transpose, max_points=max_points)\n parameters[\"max_points\"] = max_points # only save for quiver plot\n element = ax.quiver(\n data[\"x\"], data[\"y\"], data[\"data_x\"].T, data[\"data_y\"].T, **kwargs\n )\n\n elif method == \"streamplot\":\n # plot vector field using a streamplot\n data = self.get_vector_data(transpose=transpose)\n element = ax.streamplot(\n data[\"x\"], data[\"y\"], data[\"data_x\"].T, data[\"data_y\"].T, **kwargs\n )\n\n else:\n raise ValueError(f\"Vector plot `{method}` is not supported.\")\n\n # set some default properties of the plot\n ax.set_aspect(\"equal\")\n ax.set_xlabel(data[\"label_x\"])\n ax.set_ylabel(data[\"label_y\"])\n ax.set_title(data.get(\"title\", self.label))\n\n return PlotReference(ax, element, parameters)\n\n def _update_vector_plot(self, reference: PlotReference) -> None:\n \"\"\"update a vector plot with the current field values\n\n Args:\n reference (:class:`PlotReference`):\n The reference to the plot that is updated\n \"\"\"\n # extract general parameters\n method = reference.parameters.get(\"method\", \"quiver\")\n transpose = reference.parameters.get(\"transpose\", False)\n\n if method == \"quiver\":\n # update the data of a quiver plot\n max_points = reference.parameters.get(\"max_points\")\n data = self.get_vector_data(transpose=transpose, max_points=max_points)\n reference.element.set_UVC(data[\"data_x\"], data[\"data_y\"])\n\n elif method == \"streamplot\":\n # update a streamplot by redrawing it completely\n ax = reference.ax\n kwargs = reference.parameters.get(\"kwargs\", {})\n data = self.get_vector_data(transpose=transpose)\n # remove old streamplot\n ax.cla()\n # update with new streamplot\n reference.element = ax.streamplot(\n data[\"x\"], data[\"y\"], data[\"data_x\"].T, data[\"data_y\"].T, **kwargs\n )\n\n else:\n raise ValueError(f\"Vector plot `{method}` is not supported.\")\n\n def _update_plot(self, reference: PlotReference) -> None:\n \"\"\"update a plot with the current field values\n\n Args:\n reference (:class:`PlotReference`):\n The reference to the plot to updated\n \"\"\"\n import matplotlib as mpl\n\n # update the plot based on the given reference\n el = reference.element\n if isinstance(el, mpl.lines.Line2D):\n self._update_line_plot(reference)\n elif isinstance(el, mpl.image.AxesImage):\n self._update_image_plot(reference)\n elif isinstance(el, (mpl.quiver.Quiver, mpl.streamplot.StreamplotSet)):\n self._update_vector_plot(reference)\n else:\n raise ValueError(f\"Unknown plot element {el.__class__.__name__}\")\n\n @plot_on_axes(update_method=\"_update_plot\")\n def plot(self, kind: str = \"auto\", **kwargs) -> PlotReference:\n r\"\"\"visualize the field\n\n Args:\n kind (str):\n Determines the visualizations. Supported values are `image`,\n `line`, `vector`, or `interactive`. Alternatively, `auto`\n determines the best visualization based on the field itself.\n {PLOT_ARGS}\n \\**kwargs:\n All additional keyword arguments are forwarded to the actual\n plotting function.\n\n Returns:\n :class:`PlotReference`: Instance that contains information to update\n the plot with new data later.\n \"\"\"\n # determine the correct kind of plotting\n if kind == \"auto\":\n # determine best plot for this field\n if (\n isinstance(self, DataFieldBase)\n and self.rank == 1\n and self.grid.dim == 2\n ):\n kind = \"vector\"\n elif len(self.grid.shape) == 1:\n kind = \"line\"\n else:\n kind = \"image\"\n\n elif kind == \"quiver\":\n kind = \"vector\"\n kwargs[\"method\"] = \"quiver\"\n\n elif kind == \"streamplot\":\n kind = \"vector\"\n kwargs[\"method\"] = \"streamplot\"\n\n # do the actual plotting\n if kind == \"image\":\n reference = self._plot_image(**kwargs)\n elif kind == \"line\":\n reference = self._plot_line(**kwargs)\n elif kind == \"vector\":\n reference = self._plot_vector(**kwargs)\n else:\n raise ValueError(\n f\"Unsupported plot `{kind}`. Possible choices are `image`, `line`, \"\n \"`vector`, or `auto`.\"\n )\n\n return reference\n\n def _get_napari_layer_data(\n self, scalar: str = \"auto\", args: Dict[str, Any] = None\n ) -> Dict[str, Any]:\n \"\"\"returns data for plotting on a single napari layer\n\n Args:\n scalar (str):\n Indicates how the scalar field is generated; see `to_scalar`\n args (dict):\n Additional arguments returned in the result, which affect how the layer\n is shown.\n\n Returns:\n dict: all the information necessary to plot this field\n \"\"\"\n result = {} if args is None else args.copy()\n\n result.setdefault(\"scale\", self.grid.discretization)\n result.setdefault(\"rgb\", False)\n result[\"type\"] = \"image\"\n result[\"data\"] = self.to_scalar(scalar).data\n return result\n\n def _get_napari_data(self, **kwargs) -> Dict[str, Dict[str, Any]]:\n r\"\"\"returns data for plotting this field\n\n Args:\n \\**kwargs: all arguments are forwarded to `_get_napari_layer_data`\n\n Returns:\n dict: all the information necessary to plot this field\n \"\"\"\n name = \"Field\" if self.label is None else self.label\n return {name: self._get_napari_layer_data(**kwargs)}\n\n\ndef _get_field_class_by_rank(rank: int) -> Type[DataFieldBase]:\n \"\"\"return a field class associated with a certain rank\n\n Args:\n rank (int): The rank of the tensor field\n \"\"\"\n # deprecated on 2021-09-17\n warnings.warn(\"Use DataFieldBase.get_class_by_rank instead.\", DeprecationWarning)\n return DataFieldBase.get_class_by_rank(rank)\n" ]
[ [ "numpy.sqrt", "numpy.asarray", "scipy.ndimage.gaussian_filter1d", "numpy.all", "numpy.iscomplexobj", "numpy.moveaxis", "numpy.random.default_rng", "scipy.interpolate.RegularGridInterpolator", "numpy.subtract", "numpy.flatnonzero", "numpy.atleast_1d", "numpy.std", "numpy.choose", "numpy.zeros", "matplotlib.pyplot.imsave", "numpy.iscomplex", "numpy.divmod", "numpy.array", "numpy.array_equal", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.result_type", "numpy.broadcast_to", "numpy.isscalar", "numpy.ndindex", "numpy.empty" ] ]
cnvrg/keras
[ "7f9bea44d5d4512fe21d0263d00fd39a9fb5c671" ]
[ "tests/keras/engine/test_training.py" ]
[ "import pytest\r\nimport numpy as np\r\nfrom numpy.testing import assert_allclose\r\n\r\nfrom keras.layers import Dense, Dropout\r\nfrom keras.engine.topology import merge, Input\r\nfrom keras.engine.training import Model, check_loss_and_target_compatibility\r\nfrom keras.models import Sequential\r\nfrom keras import backend as K\r\nfrom keras.utils.test_utils import keras_test\r\nfrom keras.callbacks import LambdaCallback\r\n\r\n\r\n@keras_test\r\ndef test_model_methods():\r\n a = Input(shape=(3,), name='input_a')\r\n b = Input(shape=(3,), name='input_b')\r\n\r\n a_2 = Dense(4, name='dense_1')(a)\r\n dp = Dropout(0.5, name='dropout')\r\n b_2 = dp(b)\r\n\r\n model = Model([a, b], [a_2, b_2])\r\n\r\n optimizer = 'rmsprop'\r\n loss = 'mse'\r\n loss_weights = [1., 0.5]\r\n model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,\r\n sample_weight_mode=None)\r\n\r\n input_a_np = np.random.random((10, 3))\r\n input_b_np = np.random.random((10, 3))\r\n\r\n output_a_np = np.random.random((10, 4))\r\n output_b_np = np.random.random((10, 3))\r\n\r\n # test train_on_batch\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},\r\n [output_a_np, output_b_np])\r\n out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},\r\n {'dense_1': output_a_np, 'dropout': output_b_np})\r\n\r\n # test fit\r\n out = model.fit([input_a_np, input_b_np],\r\n [output_a_np, output_b_np], nb_epoch=1, batch_size=4)\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n [output_a_np, output_b_np], nb_epoch=1, batch_size=4)\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n {'dense_1': output_a_np, 'dropout': output_b_np},\r\n nb_epoch=1, batch_size=4)\r\n\r\n # test validation_split\r\n out = model.fit([input_a_np, input_b_np],\r\n [output_a_np, output_b_np],\r\n nb_epoch=1, batch_size=4, validation_split=0.5)\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n [output_a_np, output_b_np],\r\n nb_epoch=1, batch_size=4, validation_split=0.5)\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n {'dense_1': output_a_np, 'dropout': output_b_np},\r\n nb_epoch=1, batch_size=4, validation_split=0.5)\r\n\r\n # test validation data\r\n out = model.fit([input_a_np, input_b_np],\r\n [output_a_np, output_b_np],\r\n nb_epoch=1, batch_size=4,\r\n validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n [output_a_np, output_b_np],\r\n nb_epoch=1, batch_size=4, validation_split=0.5,\r\n validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))\r\n out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},\r\n {'dense_1': output_a_np, 'dropout': output_b_np},\r\n nb_epoch=1, batch_size=4, validation_split=0.5,\r\n validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))\r\n\r\n # test_on_batch\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},\r\n [output_a_np, output_b_np])\r\n out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},\r\n {'dense_1': output_a_np, 'dropout': output_b_np})\r\n\r\n # predict_on_batch\r\n out = model.predict_on_batch([input_a_np, input_b_np])\r\n out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})\r\n\r\n # predict, evaluate\r\n input_a_np = np.random.random((10, 3))\r\n input_b_np = np.random.random((10, 3))\r\n\r\n output_a_np = np.random.random((10, 4))\r\n output_b_np = np.random.random((10, 3))\r\n\r\n out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)\r\n out = model.predict([input_a_np, input_b_np], batch_size=4)\r\n\r\n # with sample_weight\r\n input_a_np = np.random.random((10, 3))\r\n input_b_np = np.random.random((10, 3))\r\n\r\n output_a_np = np.random.random((10, 4))\r\n output_b_np = np.random.random((10, 3))\r\n\r\n sample_weight = [None, np.random.random((10,))]\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np],\r\n sample_weight=sample_weight)\r\n\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np],\r\n sample_weight=sample_weight)\r\n\r\n # test accuracy metric\r\n model.compile(optimizer, loss, metrics=['acc'],\r\n sample_weight_mode=None)\r\n\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 5\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 5\r\n\r\n # this should also work\r\n model.compile(optimizer, loss, metrics={'dense_1': 'acc'},\r\n sample_weight_mode=None)\r\n\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 4\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 4\r\n\r\n # and this as well\r\n model.compile(optimizer, loss, metrics={'dense_1': ['acc']},\r\n sample_weight_mode=None)\r\n\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 4\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == 4\r\n\r\n # test starting from non-zero initial epoch\r\n trained_epochs = []\r\n\r\n def on_epoch_begin(epoch, logs):\r\n trained_epochs.append(epoch)\r\n tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin)\r\n out = model.fit([input_a_np, input_b_np],\r\n [output_a_np, output_b_np], nb_epoch=5, batch_size=4,\r\n initial_epoch=2, callbacks=[tracker_cb])\r\n assert trained_epochs == [2, 3, 4]\r\n\r\n # test starting from non-zero initial epoch for generator too\r\n trained_epochs = []\r\n\r\n def gen_data(batch_sz):\r\n while True:\r\n yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],\r\n [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])\r\n out = model.fit_generator(gen_data(4), samples_per_epoch=10, nb_epoch=5,\r\n initial_epoch=2, callbacks=[tracker_cb])\r\n assert trained_epochs == [2, 3, 4]\r\n\r\n # test with a custom metric function\r\n mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2))\r\n\r\n def mse_powers(y_true, y_pred):\r\n m = mse(y_true, y_pred)\r\n return {\r\n 'mse_squared': K.pow(m, 2),\r\n 'mse_cubed': K.pow(m, 3)\r\n }\r\n\r\n model.compile(optimizer, loss, metrics=[mse, mse_powers],\r\n sample_weight_mode=None)\r\n\r\n out = model.train_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n out_len = 1 + 2 * 4 # total loss, per layer: loss + 3 metrics\r\n assert len(out) == out_len\r\n out = model.test_on_batch([input_a_np, input_b_np],\r\n [output_a_np, output_b_np])\r\n assert len(out) == out_len\r\n\r\n input_a_np = np.random.random((10, 3))\r\n input_b_np = np.random.random((10, 3))\r\n\r\n output_a_np = np.random.random((10, 4))\r\n output_b_np = np.random.random((10, 3))\r\n\r\n out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1)\r\n out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)\r\n out = model.predict([input_a_np, input_b_np], batch_size=4)\r\n\r\n\r\n@keras_test\r\ndef test_trainable_argument():\r\n x = np.random.random((5, 3))\r\n y = np.random.random((5, 2))\r\n\r\n model = Sequential()\r\n model.add(Dense(2, input_dim=3, trainable=False))\r\n model.compile('rmsprop', 'mse')\r\n out = model.predict(x)\r\n model.train_on_batch(x, y)\r\n out_2 = model.predict(x)\r\n assert_allclose(out, out_2)\r\n\r\n # test with nesting\r\n input = Input(shape=(3,))\r\n output = model(input)\r\n model = Model(input, output)\r\n model.compile('rmsprop', 'mse')\r\n out = model.predict(x)\r\n model.train_on_batch(x, y)\r\n out_2 = model.predict(x)\r\n assert_allclose(out, out_2)\r\n\r\n\r\n@keras_test\r\ndef test_check_not_failing():\r\n a = np.random.random((2, 1, 3))\r\n check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])\r\n check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, None, 3)])\r\n\r\n\r\n@keras_test\r\ndef test_check_last_is_one():\r\n a = np.random.random((2, 3, 1))\r\n with pytest.raises(Exception) as exc:\r\n check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])\r\n\r\n assert \"You are passing a target array\" in str(exc)\r\n\r\n\r\n@keras_test\r\ndef test_check_bad_shape():\r\n a = np.random.random((2, 3, 5))\r\n with pytest.raises(Exception) as exc:\r\n check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, 3, 6)])\r\n\r\n assert \"targets to have the same shape\" in str(exc)\r\n\r\n\r\nif __name__ == '__main__':\r\n pytest.main([__file__])\r\n" ]
[ [ "numpy.random.random", "numpy.testing.assert_allclose" ] ]
peter-herman/weather_app
[ "b3fae3450bde19d7fd8b7b065b16ecebc35f9f09" ]
[ "test.py" ]
[ "import pandas as pd\nimport json, requests\nimport matplotlib.pyplot as plt\nurl = 'https://api.weather.gov/points/38.9072,-77.0369'\ndata = json.loads(requests.get(url).text)\nforecast_url = data['properties']['forecastHourly']\n\nforecast_data = json.loads(requests.get(forecast_url).text)\nforecast_df = pd.DataFrame(forecast_data['properties']['periods'])\n\nforecast_df.head()\nforecast_df['date'] = forecast_df['startTime'].str[5:10]\nforecast_df['hour'] = forecast_df['startTime'].str[11:13]\nforecast_df['time'] = forecast_df['date'] + ', ' + forecast_df['hour'] + ':00'\nforecast_df['Wind'] = forecast_df['windSpeed'].str.rstrip(' mph').astype(int)\nforecast_24 = forecast_df.loc[0:24,:]\n\nfig, axis = plt.subplots()\nbar_width = 0.5\nbars1 = axis.bar(forecast_24.index, forecast_24['Wind'],bar_width,\n alpha = 0.5, label='Wind Speed')\nbars2 = axis.bar(forecast_24.index+bar_width, forecast_24['temperature'], bar_width,\n alpha = 0.5,label = 'Temperature')\naxis.legend()\naxis.set_xlabel('Date, Time')\naxis.set_ylabel('Wind Speed (MPH)')\naxis.set_title('Hourly Wind Speed Forecast')\naxis.set_xticks(forecast_24.index + bar_width / 2)\naxis.set_xticklabels(forecast_24['hour'])\nfig.tight_layout()\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "pandas.DataFrame" ] ]
Silvicek/distributional-dqn
[ "41a9095393dd25b7375119b4af7d2c35ee3ec6cc" ]
[ "distdeepq/build_graph.py" ]
[ "\"\"\"Deep Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized P function to the target P function.\n In distributional RL we actually optimize the following error:\n\n ThTz(P') * log(P)\n\n Where P' is lagging behind P to stablize the learning.\n\n\"\"\"\nimport tensorflow as tf\nimport baselines.common.tf_util as U\nfrom .static import build_z\n\n\ndef default_param_noise_filter(var):\n if var not in tf.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef p_to_q(p_values, dist_params):\n z, _ = build_z(**dist_params)\n print(z, p_values)\n return tf.tensordot(p_values, z, [[-1], [-1]])\n\n\ndef pick_action(p_values, dist_params):\n q_values = p_to_q(p_values, dist_params)\n deterministic_actions = tf.argmax(q_values, axis=1)\n return deterministic_actions\n\n\ndef build_act(make_obs_ph, p_dist_func, num_actions, dist_params, scope=\"distdeepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n p_dist_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = U.ensure_tf_input(make_obs_ph(\"observation\"))\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n\n p_values = p_dist_func(observations_ph.get(), num_actions, dist_params['nb_atoms'], scope=\"q_func\")\n deterministic_actions = pick_action(p_values, dist_params)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n return act\n\n\ndef build_train(make_obs_ph, p_dist_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,\n double_q=True, scope=\"distdeepq\", reuse=None, param_noise=False, param_noise_filter_func=None,\n dist_params=None):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n p_dist_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n\n if param_noise:\n raise ValueError('parameter noise not supported')\n else:\n act_f = build_act(make_obs_ph, p_dist_func, num_actions, dist_params, scope=scope, reuse=reuse)\n\n with tf.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = U.ensure_tf_input(make_obs_ph(\"obs_t\"))\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"reward\")\n obs_tp1_input = U.ensure_tf_input(make_obs_ph(\"obs_tp1\"))\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done\")\n importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"weight\")\n\n # =====================================================================================\n # q network evaluation\n p_t = p_dist_func(obs_t_input.get(), num_actions, dist_params['nb_atoms'], scope=\"q_func\", reuse=True) # reuse parameters from act\n q_t = p_to_q(p_t, dist_params) # reuse parameters from act\n q_func_vars = U.scope_vars(U.absolute_scope_name(\"q_func\"))\n\n # target q network evalution\n p_tp1 = p_dist_func(obs_tp1_input.get(), num_actions, dist_params['nb_atoms'], scope=\"target_q_func\")\n q_tp1 = p_to_q(p_tp1, dist_params)\n target_q_func_vars = U.scope_vars(U.absolute_scope_name(\"target_q_func\"))\n\n # TODO: use double\n\n a_next = tf.argmax(q_tp1, 1, output_type=tf.int32)\n batch_dim = tf.shape(rew_t_ph)[0]\n ThTz, debug = build_categorical_alg(p_tp1, rew_t_ph, a_next, gamma, batch_dim, done_mask_ph, dist_params)\n\n # compute the error (potentially clipped)\n cat_idx = tf.transpose(tf.reshape(tf.concat([tf.range(batch_dim), act_t_ph], axis=0), [2, batch_dim]))\n p_t_next = tf.gather_nd(p_t, cat_idx)\n\n cross_entropy = -1 * ThTz * tf.log(p_t_next)\n errors = tf.reduce_sum(cross_entropy, axis=-1)\n\n mean_error = tf.reduce_mean(errors)\n\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n optimize_expr = U.minimize_and_clip(optimizer,\n mean_error,\n var_list=q_func_vars,\n clip_val=grad_norm_clipping)\n else:\n optimize_expr = optimizer.minimize(mean_error, var_list=q_func_vars)\n\n # =====================================================================================\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_expr = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n update_target_expr = tf.group(*update_target_expr)\n\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph\n ],\n outputs=mean_error,\n updates=[optimize_expr]\n )\n update_target = U.function([], [], updates=[update_target_expr])\n\n q_values = U.function([obs_t_input], q_t)\n\n return act_f, train, update_target, {'q_values': q_values,\n 'p': p_tp1,\n 'cross_entropy': cross_entropy,\n 'ThTz': ThTz}\n\n\ndef build_categorical_alg(p_ph, r_ph, a_next, gamma, batch_dim, done_mask, dist_params):\n \"\"\"\n Builds the vectorized cathegorical algorithm following equation (7) of \n 'A Distributional Perspective on Reinforcement Learning' - https://arxiv.org/abs/1707.06887\n \"\"\"\n z, dz = build_z(**dist_params)\n Vmin, Vmax, nb_atoms = dist_params['Vmin'], dist_params['Vmax'], dist_params['nb_atoms']\n with tf.variable_scope('cathegorical'):\n\n cat_idx = tf.transpose(tf.reshape(tf.concat([tf.range(batch_dim), a_next], axis=0), [2, batch_dim]))\n p_best = tf.gather_nd(p_ph, cat_idx)\n\n big_z = tf.reshape(tf.tile(z, [batch_dim]), [batch_dim, nb_atoms])\n big_r = tf.transpose(tf.reshape(tf.tile(r_ph, [nb_atoms]), [nb_atoms, batch_dim]))\n\n Tz = tf.clip_by_value(big_r + gamma * tf.einsum('ij,i->ij', big_z, 1.-done_mask), Vmin, Vmax)\n\n big_Tz = tf.reshape(tf.tile(Tz, [1, nb_atoms]), [-1, nb_atoms, nb_atoms])\n big_big_z = tf.reshape(tf.tile(big_z, [1, nb_atoms]), [-1, nb_atoms, nb_atoms])\n\n Tzz = tf.abs(big_Tz - tf.transpose(big_big_z, [0, 2, 1])) / dz\n Thz = tf.clip_by_value(1 - Tzz, 0, 1)\n\n ThTz = tf.einsum('ijk,ik->ij', Thz, p_best)\n\n return ThTz, {'p_best': p_best}\n\n\n" ]
[ [ "tensorflow.cond", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.where", "tensorflow.group", "tensorflow.trainable_variables", "tensorflow.tensordot", "tensorflow.argmax", "tensorflow.tile", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.clip_by_value", "tensorflow.transpose", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.einsum", "tensorflow.constant_initializer", "tensorflow.log", "tensorflow.variable_scope" ] ]
mtreviso/deepbondd
[ "0f412496eff000ba09de7d4ee5ccfa63f5ad4ab6" ]
[ "deepbond/models/utils.py" ]
[ "import copy\n\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\n\n\ndef unmask(tensor, mask, cut_length=0):\n \"\"\"\n Unmask a tensor and convert it back to a list of lists.\n\n Args:\n tensor (torch.Tensor): tensor with shape (bs, max_len, ...)\n mask (torch.Tensor): tensor with shape (bs, max_len) where 1 (or True)\n indicates a valid position, and 0 (or False) otherwise\n cut_length (int): remove the last `cut_length` elements from the tensor.\n In practice, the lengths calculated from the mask are going to be\n subtracted by `cut_length`. This is useful when you have <bos> and\n <eos> tokens in your words field and the mask was computed with\n words != <pad>. Default is 0, i.e., no cut\n Returns:\n a list of lists with variable length\n \"\"\"\n lengths = mask.int().sum(dim=-1)\n # if the mask was calculated using words, then we subtract cut_length\n # in practice: to remove the size of the <bos> and <eos> tokens\n # which are already removed from the tensor in the forward pass but not\n # from the mask\n if cut_length > 0:\n lengths -= cut_length\n lengths = lengths.tolist()\n return [x[:lengths[i]].tolist() for i, x in enumerate(tensor)]\n\n\ndef unroll(list_of_lists, rec=False):\n \"\"\"\n Unroll a list of lists.\n\n Args:\n list_of_lists (list): a list that contains lists\n rec (bool): unroll recursively\n Returns:\n a single list\n \"\"\"\n if not isinstance(list_of_lists[0], (np.ndarray, list)):\n return list_of_lists\n new_list = [item for l in list_of_lists for item in l]\n if rec and isinstance(new_list[0], (np.ndarray, list)):\n return unroll(new_list, rec=rec)\n return new_list\n\n\ndef clones(module, N):\n \"\"\"Produce N identical layers.\"\"\"\n return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\ndef subsequent_mask(size):\n \"\"\"Mask out subsequent positions.\n\n Args:\n size(int): squared tensor size\n \"\"\"\n return torch.tril(torch.ones(size, size, dtype=torch.uint8))\n\n\ndef sequence_mask(lengths, max_len=None):\n \"\"\"Creates a boolean mask from sequence lengths.\n\n Args:\n lengths (torch.LongTensor): lengths with shape (bs,)\n max_len (int, optional): max sequence length.\n if None it will be setted to lengths.max()\n \"\"\"\n if max_len is None:\n max_len = lengths.max()\n aranges = torch.arange(max_len).repeat(lengths.shape[0], 1)\n aranges = aranges.to(lengths.device)\n return aranges < lengths.unsqueeze(1)\n\n\ndef neighbours_mask(size, window_size):\n \"\"\"Mask for neighbour positions.\n\n Args:\n size(int): squared tensor size\n window_size(int): how many elements to be considered as valid around\n the ith element (including ith).\n \"\"\"\n z = torch.ones(size, size, dtype=torch.uint8)\n mask = (torch.triu(z, diagonal=1 + window_size // 2) +\n torch.tril(z, diagonal=- window_size // 2))\n return z - mask\n\n\ndef unsqueeze_as(tensor, as_tensor, dim=-1):\n \"\"\"Expand new dimensions based on a template tensor along `dim` axis.\"\"\"\n x = tensor\n while x.dim() < as_tensor.dim():\n x = x.unsqueeze(dim)\n return x\n\n\ndef make_mergeable_tensors(t1, t2):\n \"\"\"Expand a new dimension in t1 and t2 and expand them so that both\n tensors will have the same number of timesteps.\n\n Args:\n t1 (torch.Tensor): tensor with shape (bs, ..., m, d1)\n t2 (torch.Tensor): tensor with shape (bs, ..., n, d2)\n\n Returns:\n torch.Tensor: (bs, ..., m, n, d1)\n torch.Tensor: (bs, ..., m, n, d2)\n \"\"\"\n assert t1.dim() == t2.dim()\n assert t1.dim() >= 3\n assert t1.shape[:-2] == t2.shape[:-2]\n # new_shape = [-1, ..., m, n, -1]\n new_shape = [-1 for _ in range(t1.dim() + 1)]\n new_shape[-3] = t1.shape[-2] # m\n new_shape[-2] = t2.shape[-2] # n\n # (bs, ..., m, d1) -> (bs, ..., m, 1, d1) -> (bs, ..., m, n, d1)\n new_t1 = t1.unsqueeze(-2).expand(new_shape)\n # (bs, ..., n, d2) -> (bs, ..., 1, n, d2) -> (bs, ..., m, n, d2)\n new_t2 = t2.unsqueeze(-3).expand(new_shape)\n return new_t1, new_t2\n\n\ndef apply_packed_sequence(rnn, padded_sequences, lengths, hidden=None):\n \"\"\"\n Code adapted from Unbabel OpenKiwi.\n Runs a forward pass of embeddings through an rnn using packed sequence.\n\n Args:\n rnn: The RNN that that we want to compute a forward pass with.\n padded_sequences (FloatTensor b x seq x dim): A batch of sequence seqs.\n lengths (LongTensor batch): The length of each sequence in the batch.\n hidden (FloatTensor, optional): hidden state for the rnn.\n Returns:\n output: The output of the RNN `rnn` with input `embedding`\n \"\"\"\n # Sort Batch by sequence length\n total_length = padded_sequences.size(1) # Get the max sequence length\n lengths_sorted, permutation = torch.sort(lengths, descending=True)\n padded_sequences_sorted = padded_sequences[permutation]\n\n # Use Packed Sequence\n embedding_packed = pack(\n padded_sequences_sorted, lengths_sorted, batch_first=True\n )\n outputs_packed, hidden = rnn(embedding_packed, hidden)\n outputs_sorted, _ = unpack(\n outputs_packed, batch_first=True, total_length=total_length\n )\n\n # Restore original order\n _, permutation_rev = torch.sort(permutation, descending=False)\n outputs = outputs_sorted[permutation_rev]\n hidden[0] = hidden[0][permutation_rev]\n hidden[1] = hidden[1][permutation_rev]\n return outputs, hidden\n" ]
[ [ "torch.ones", "torch.tril", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.utils.rnn.pad_packed_sequence", "torch.sort", "torch.arange", "torch.triu" ] ]
juliakreutzer/neuralmonkey
[ "edc4cb9c375b87451430c779f2b09e0fbf183836" ]
[ "neuralmonkey/experiment.py" ]
[ "\"\"\"Provides a high-level API for training and using a model.\"\"\"\n\nfrom argparse import Namespace # pylint: disable=unused-import\nimport os\nimport random\nfrom shutil import copyfile\nimport subprocess\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple\nfrom typing import Set # pylint: disable=unused-import\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nfrom typeguard import check_argument_types\n\nfrom neuralmonkey.checking import (check_dataset_and_coders,\n CheckingException)\nfrom neuralmonkey.logging import Logging, log, debug\nfrom neuralmonkey.config.configuration import Configuration\nfrom neuralmonkey.learning_utils import (training_loop, evaluation,\n run_on_dataset,\n print_final_evaluation)\nfrom neuralmonkey.dataset import Dataset\nfrom neuralmonkey.model.sequence import EmbeddedFactorSequence\nfrom neuralmonkey.runners.base_runner import ExecutionResult\nfrom neuralmonkey.tf_manager import get_default_tf_manager\n\n\n_TRAIN_ARGS = [\n \"val_dataset\", \"trainer\", \"name\", \"train_dataset\", \"epochs\",\n \"test_datasets\", \"initial_variables\", \"validation_period\",\n \"val_preview_input_series\", \"val_preview_output_series\",\n \"val_preview_num_examples\", \"logging_period\", \"visualize_embeddings\",\n \"random_seed\", \"overwrite_output_dir\"\n]\n\n\n_EXPERIMENT_FILES = [\"experiment.log\", \"experiment.ini\", \"original.ini\",\n \"git_commit\", \"git_diff\", \"variables.data.best\"]\n\n\nclass Experiment(object):\n # pylint: disable=no-member\n\n _current_experiment = None\n\n def __init__(self,\n config_path: str,\n train_mode: bool = False,\n overwrite_output_dir: bool = False,\n config_changes: List[str] = None) -> None:\n \"\"\"Initialize a Neural Monkey experiment.\n\n Arguments:\n config_path: The path to the experiment configuration file.\n train_mode: Indicates whether the model should be prepared for\n training.\n overwrite_output_dir: Indicates whether an existing experiment\n should be reused. If `True`, this overrides the setting in\n the configuration file.\n config_changes: A list of modifications that will be made to the\n loaded configuration file before parsing.\n \"\"\"\n self.train_mode = train_mode\n self._config_path = config_path\n\n self.graph = tf.Graph()\n self._initializers = {} # type: Dict[str, Callable]\n self._initialized_variables = set() # type: Set[str]\n self.cont_index = -1\n self._model_built = False\n self._vars_loaded = False\n self._model = None # type: Optional[Namespace]\n\n self.config = create_config(train_mode)\n self.config.load_file(config_path, config_changes)\n args = self.config.args\n\n if self.train_mode:\n # We may need to create the experiment directory.\n if (os.path.isdir(args.output)\n and os.path.exists(\n os.path.join(args.output, \"experiment.ini\"))):\n if args.overwrite_output_dir or overwrite_output_dir:\n # we do not want to delete the directory contents\n log(\"Directory with experiment.ini '{}' exists, \"\n \"overwriting enabled, proceeding.\".format(args.output))\n else:\n raise RuntimeError(\n \"Directory with experiment.ini '{}' exists, \"\n \"overwriting disabled.\".format(args.output))\n\n if not os.path.isdir(args.output):\n os.mkdir(args.output)\n\n # Find how many times the experiment has been continued.\n while any(os.path.exists(self.get_path(f, self.cont_index + 1))\n for f in _EXPERIMENT_FILES):\n self.cont_index += 1\n\n @property\n def model(self) -> Namespace:\n if self._model is None:\n raise RuntimeError(\"Experiment argument model not initialized\")\n\n return self._model\n\n def build_model(self) -> None:\n if self._model_built:\n raise RuntimeError(\"build_model() called twice\")\n\n random.seed(self.config.args.random_seed)\n np.random.seed(self.config.args.random_seed)\n\n with self.graph.as_default():\n tf.set_random_seed(self.config.args.random_seed)\n\n # Enable the created model parts to find this experiment.\n type(self)._current_experiment = self # type: ignore\n self.config.build_model(warn_unused=self.train_mode)\n type(self)._current_experiment = None\n\n self._model = self.config.model\n self._model_built = True\n\n if self.model.runners_batch_size is None:\n self.model.runners_batch_size = self.model.batch_size\n\n if self.model.tf_manager is None:\n self.model.tf_manager = get_default_tf_manager()\n\n if self.train_mode:\n check_dataset_and_coders(self.model.train_dataset,\n self.model.runners)\n if isinstance(self.model.val_dataset, Dataset):\n check_dataset_and_coders(self.model.val_dataset,\n self.model.runners)\n else:\n for val_dataset in self.model.val_dataset:\n check_dataset_and_coders(val_dataset,\n self.model.runners)\n\n if self.train_mode and self.model.visualize_embeddings:\n visualize_embeddings(self.model.visualize_embeddings,\n self.model.output)\n\n self._check_unused_initializers()\n\n def train(self) -> None:\n if not self.train_mode:\n raise RuntimeError(\"train() was called, but the experiment was \"\n \"created with train_mode=False\")\n if not self._model_built:\n self.build_model()\n\n self.cont_index += 1\n\n # Initialize the experiment directory.\n self.config.save_file(self.get_path(\"experiment.ini\"))\n copyfile(self._config_path, self.get_path(\"original.ini\"))\n save_git_info(self.get_path(\"git_commit\"), self.get_path(\"git_diff\"))\n Logging.set_log_file(self.get_path(\"experiment.log\"))\n\n Logging.print_header(self.model.name, self.model.output)\n\n with self.graph.as_default():\n self.model.tf_manager.init_saving(self.get_path(\"variables.data\"))\n\n training_loop(\n tf_manager=self.model.tf_manager,\n epochs=self.model.epochs,\n trainer=self.model.trainer,\n batch_size=self.model.batch_size,\n log_directory=self.model.output,\n evaluators=self.model.evaluation,\n runners=self.model.runners,\n train_dataset=self.model.train_dataset,\n val_dataset=self.model.val_dataset,\n test_datasets=self.model.test_datasets,\n logging_period=self.model.logging_period,\n validation_period=self.model.validation_period,\n val_preview_input_series=self.model.val_preview_input_series,\n val_preview_output_series=self.model.val_preview_output_series,\n val_preview_num_examples=self.model.val_preview_num_examples,\n postprocess=self.model.postprocess,\n train_start_offset=self.model.train_start_offset,\n runners_batch_size=self.model.runners_batch_size,\n initial_variables=self.model.initial_variables)\n\n self._vars_loaded = True\n\n def load_variables(self, variable_files: List[str] = None) -> None:\n if not self._model_built:\n self.build_model()\n\n if variable_files is None:\n variable_files = [self.get_path(\"variables.data\")]\n log(\"Default variable file '{}' will be used for loading \"\n \"variables.\".format(variable_files[0]))\n\n for vfile in variable_files:\n if not os.path.exists(\"{}.index\".format(vfile)):\n raise RuntimeError(\n \"Index file for var prefix {} does not exist\"\n .format(vfile))\n\n self.model.tf_manager.restore(variable_files)\n self._vars_loaded = True\n\n def run_model(self,\n dataset: Dataset,\n write_out: bool = False,\n batch_size: int = None,\n log_progress: int = 0) -> Tuple[List[ExecutionResult],\n Dict[str, List[Any]]]:\n \"\"\"Run the model on a given dataset.\n\n Args:\n dataset: The dataset on which the model will be executed.\n write_out: Flag whether the outputs should be printed to a file\n defined in the dataset object.\n batch_size: size of the minibatch\n log_progress: log progress every X seconds\n\n Returns:\n A list of `ExecutionResult`s and a dictionary of the output series.\n \"\"\"\n if not self._model_built:\n self.build_model()\n if not self._vars_loaded:\n self.load_variables()\n\n with self.graph.as_default():\n # TODO: check_dataset_and_coders(dataset, self.model.runners)\n return run_on_dataset(\n self.model.tf_manager, self.model.runners, dataset,\n self.model.postprocess,\n write_out=write_out, log_progress=log_progress,\n batch_size=batch_size or self.model.runners_batch_size)\n\n def evaluate(self,\n dataset: Dataset,\n write_out: bool = False,\n batch_size: int = None,\n log_progress: int = 0) -> Dict[str, Any]:\n \"\"\"Run the model on a given dataset and evaluate the outputs.\n\n Args:\n dataset: The dataset on which the model will be executed.\n write_out: Flag whether the outputs should be printed to a file\n defined in the dataset object.\n batch_size: size of the minibatch\n log_progress: log progress every X seconds\n\n Returns:\n Dictionary of evaluation names and their values which includes the\n metrics applied on respective series loss and loss values from the\n run.\n \"\"\"\n execution_results, output_data = self.run_model(\n dataset, write_out, batch_size, log_progress)\n\n evaluators = [(e[0], e[0], e[1]) if len(e) == 2 else e\n for e in self.model.evaluation]\n with self.graph.as_default():\n eval_result = evaluation(\n evaluators, dataset, self.model.runners,\n execution_results, output_data)\n if eval_result:\n print_final_evaluation(dataset.name, eval_result)\n\n return eval_result\n\n def get_path(self, filename: str, cont_index: int = None) -> str:\n \"\"\"Return the path to the most recent version of the given file.\"\"\"\n if cont_index is None:\n cont_index = self.cont_index\n cont_suffix = \".cont-{}\".format(cont_index) if cont_index > 0 else \"\"\n\n if filename.startswith(\"variables.data\"):\n new_filename = \"variables.data\" + cont_suffix + filename[14:]\n else:\n new_filename = filename + cont_suffix\n\n return os.path.join(self.config.args.output, new_filename)\n\n def update_initializers(\n self, initializers: Iterable[Tuple[str, Callable]]) -> None:\n \"\"\"Update the dictionary mapping variable names to initializers.\"\"\"\n self._initializers.update(initializers)\n\n def get_initializer(self, var_name: str,\n default: Callable = None) -> Optional[Callable]:\n \"\"\"Return the initializer associated with the given variable name.\n\n Calling the method marks the given initializer as used.\n \"\"\"\n initializer = self._initializers.get(var_name, default)\n if initializer is not default:\n debug(\"Using {} for variable {}\".format(initializer, var_name))\n self._initialized_variables.add(var_name)\n return initializer\n\n def _check_unused_initializers(self) -> None:\n unused_initializers = [name for name in self._initializers\n if name not in self._initialized_variables]\n if unused_initializers:\n raise CheckingException(\n \"Initializers were specified for the following non-existent \"\n \"variables: \" + \", \".join(unused_initializers))\n\n @classmethod\n def get_current(cls) -> \"Experiment\":\n \"\"\"Return the experiment that is currently being built.\"\"\"\n return cls._current_experiment or _DUMMY_EXPERIMENT\n\n\ndef create_config(train_mode: bool = True) -> Configuration:\n config = Configuration()\n config.add_argument(\"tf_manager\", required=False, default=None)\n config.add_argument(\"batch_size\", cond=lambda x: x > 0)\n config.add_argument(\"output\")\n config.add_argument(\"postprocess\", required=False, default=None)\n config.add_argument(\"runners\")\n config.add_argument(\"runners_batch_size\", required=False, default=None)\n\n if train_mode:\n config.add_argument(\"epochs\", cond=lambda x: x >= 0)\n config.add_argument(\"trainer\")\n config.add_argument(\"train_dataset\")\n config.add_argument(\"val_dataset\")\n config.add_argument(\"evaluation\")\n config.add_argument(\"test_datasets\", required=False, default=[])\n config.add_argument(\"logging_period\", required=False, default=20)\n config.add_argument(\"validation_period\", required=False, default=500)\n config.add_argument(\"visualize_embeddings\", required=False,\n default=None)\n config.add_argument(\"val_preview_input_series\",\n required=False, default=None)\n config.add_argument(\"val_preview_output_series\",\n required=False, default=None)\n config.add_argument(\"val_preview_num_examples\",\n required=False, default=15)\n config.add_argument(\"train_start_offset\", required=False, default=0)\n config.add_argument(\"name\", required=False,\n default=\"Neural Monkey Experiment\")\n config.add_argument(\"random_seed\", required=False, default=2574600)\n config.add_argument(\"initial_variables\", required=False, default=None)\n config.add_argument(\"overwrite_output_dir\", required=False,\n default=False)\n else:\n config.add_argument(\"evaluation\", required=False, default=None)\n for argument in _TRAIN_ARGS:\n config.ignore_argument(argument)\n\n return config\n\n\nclass _DummyExperiment(Experiment):\n \"\"\"A dummy Experiment.\n\n An instance of this class takes care of initializers when no other\n experiment is the current experiment. This is needed when someone creates\n a model part outside an experiment (e.g. in a unit test).\n \"\"\"\n\n def __init__(self):\n # pylint: disable=super-init-not-called\n self._initializers = {} # type: Dict[str, Callable]\n self._initialized_variables = set() # type: Set[str]\n self._warned = False\n\n def update_initializers(\n self, initializers: Iterable[Tuple[str, Callable]]) -> None:\n self._warn()\n super().update_initializers(initializers)\n\n def get_initializer(self, var_name: str,\n default: Callable = None) -> Optional[Callable]:\n \"\"\"Return the initializer associated with the given variable name.\"\"\"\n self._warn()\n return super().get_initializer(var_name, default)\n\n def _warn(self) -> None:\n if not self._warned:\n log(\"Warning: Creating a model part outside of an experiment.\",\n color=\"red\")\n self._warned = True\n\n\n_DUMMY_EXPERIMENT = _DummyExperiment()\n\n\ndef save_git_info(git_commit_file: str, git_diff_file: str,\n branch: str = \"HEAD\", repo_dir: str = None) -> None:\n if repo_dir is None:\n # This points inside the neuralmonkey/ dir inside the repo, but\n # it does not matter for git.\n repo_dir = os.path.dirname(os.path.realpath(__file__))\n\n with open(git_commit_file, \"wb\") as file:\n subprocess.run([\"git\", \"log\", \"-1\", \"--format=%H\", branch],\n cwd=repo_dir, stdout=file)\n\n with open(git_diff_file, \"wb\") as file:\n subprocess.run([\"git\", \"--no-pager\", \"diff\", \"--color=always\", branch],\n cwd=repo_dir, stdout=file)\n\n\ndef visualize_embeddings(sequences: List[EmbeddedFactorSequence],\n output_dir: str) -> None:\n check_argument_types()\n\n tb_projector = projector.ProjectorConfig()\n\n for sequence in sequences:\n sequence.tb_embedding_visualization(output_dir, tb_projector)\n\n summary_writer = tf.summary.FileWriter(output_dir)\n projector.visualize_embeddings(summary_writer, tb_projector)\n" ]
[ [ "tensorflow.Graph", "tensorflow.summary.FileWriter", "numpy.random.seed", "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "tensorflow.set_random_seed", "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig" ] ]
mli/tvm
[ "814b46dccb3c340be7611d113a303a546f7b2b2a" ]
[ "topi/tests/python/test_topi_upsampling.py" ]
[ "\"\"\"Test code for upsampling\"\"\"\nimport numpy as np\nimport tvm\nimport topi\nimport math\n\ndef verify_upsampling(batch, in_channel, in_height, in_width, scale):\n A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')\n B = topi.nn.upsampling(A, scale)\n out_shape = (batch, in_channel, in_height*scale, in_width*scale)\n dtype = A.dtype\n\n a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)\n b_np = topi.testing.upsampling_python(a_np, scale)\n\n def check_device(device):\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_injective(B)\n ctx = tvm.context(device, 0)\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)\n f = tvm.build(s, [A, B], device)\n f(a, b)\n\n np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n for device in ['llvm', 'cuda']:\n check_device(device)\n\ndef test_upsampling():\n verify_upsampling(8, 16, 32, 32, 2)\n verify_upsampling(12, 32, 64, 64, 3)\n\nif __name__ == \"__main__\":\n test_upsampling()\n" ]
[ [ "numpy.random.uniform", "numpy.zeros" ] ]
adynmiles/DARTS-FQA
[ "a088a0efeb1160d0cdbf2b2a3e30f132c16eb53f" ]
[ "analyze.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\nfrom numpy.linalg import eigvals\nfrom torch.autograd import Variable\nfrom copy import deepcopy\nimport logging\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\nclass Analyzer(object):\n def __init__(self, model, args):\n self.network_momentum = args.momentum\n self.network_weight_decay = args.weight_decay\n self.model = model\n self.weight_decay = args.arch_weight_decay\n self.hessian = None\n self.grads = None\n self.adaptive_stop = args.adaptive_stop\n self.adas = args.adas\n\n def _compute_unrolled_model(self, input, target, lr_vector, layers_todo, network_optimizer):\n loss = self.model._loss(input, target)\n theta = _concat(self.model.parameters()).data\n try:\n moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(\n self.network_momentum)\n except:\n moment = torch.zeros_like(theta)\n # dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay * theta\n model_params = list(filter(lambda p: p.requires_grad, self.model.parameters()))\n dtheta = _concat([grad_i + self.network_weight_decay * theta_i\n for grad_i, theta_i in\n zip(torch.autograd.grad(loss, model_params), model_params)])\n\n ################################################################################\n # AdaS\n # adaptive stopping: frozen parameters don't have gradients,\n # so don't update them\n if self.adas:\n iteration_p = 0\n offset_p = 0\n offset_dp = 0\n for p in self.model.parameters():\n p_length = np.prod(p.size())\n\n if ~layers_todo[iteration_p]:\n # not updating the frozen conv layers\n iteration_p += 1\n offset_p += p_length\n continue\n lr = lr_vector[iteration_p]\n d_p = moment[offset_p: offset_p + p_length] + \\\n dtheta[offset_dp: offset_dp + p_length]\n theta[offset_p: offset_p + p_length].sub_(d_p, alpha=lr)\n offset_p += p_length\n offset_dp += p_length\n iteration_p += 1\n unrolled_model = self._construct_model_from_theta(theta, layers_todo)\n ################################################################################\n # original darts\n else:\n unrolled_model = self._construct_model_from_theta(theta.sub(lr_vector, moment + dtheta), None)\n\n return unrolled_model\n\n def _backward_step(self, input_valid, target_valid, create_graph):\n loss = self.model._loss(input_valid, target_valid)\n loss.backward(create_graph=create_graph)\n\n def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid,\n lr, layers, network_optimizer, create_graph):\n # eqn(6):dαLval(w',α) ,where w' = w − ξ*dwLtrain(w, α)\n # w'\n unrolled_model = self._compute_unrolled_model(input_train, target_train, lr, layers,\n network_optimizer) # unrolled_model: w -> w'\n # Lval: validation loss\n unrolled_loss = unrolled_model._loss(input_valid, target_valid)\n\n unrolled_loss.backward(create_graph=create_graph)\n # dαLval(w',α)\n dalpha = [v.grad for v in unrolled_model.arch_parameters()] # grad wrt alpha\n\n # dw'Lval(w',α)\n vector = [v.grad.data if v.requires_grad else None for v in\n unrolled_model.parameters()] # unrolled_model.parameters(): w‘\n ################################################################################\n # AdaS: use different etas for different w's\n # with adaptive stopping\n if self.adas:\n iteration_p = 0\n for p in vector:\n if ~layers[iteration_p]:\n assert p is None\n iteration_p += 1\n continue\n p.mul_(lr[iteration_p])\n iteration_p += 1\n ################################################################################\n\n # eqn(8): (dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)\n # where w+=w+dw'Lval(w',α)*epsilon w- = w-dw'Lval(w',α)*epsilon\n implicit_grads = self._hessian_vector_product(vector, input_train, target_train)\n\n # eqn(6)-eqn(8): dαLval(w',α)-(dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)\n for g, ig in zip(dalpha, implicit_grads):\n # g.data.sub_(eta, ig.data)\n g.data.sub_(ig.data)\n # update α\n for v, g in zip(self.model.arch_parameters(), dalpha):\n if v.grad is None:\n v.grad = Variable(g.data)\n else:\n v.grad.data.copy_(g.data)\n\n def _construct_model_from_theta(self, theta, layers_todo):\n model_new = self.model.new()\n model_dict = self.model.state_dict()\n\n params, offset = {}, 0\n for k, v in self.model.named_parameters():\n v_length = np.prod(v.size())\n params[k] = theta[offset: offset + v_length].view(v.size())\n offset += v_length\n\n assert offset == len(theta)\n model_dict.update(params)\n model_new.load_state_dict(model_dict)\n\n ################################################################################\n # adaptive stopping\n if self.adaptive_stop:\n iteration_p = 0\n for p in model_new.parameters():\n if ~layers_todo[iteration_p]:\n p.requires_grad = False\n p.grad = None\n iteration_p += 1\n ################################################################################\n\n return model_new.cuda()\n\n def _hessian_vector_product(self, vector, input, target, r=1e-2):\n R = r / _concat(vector).norm()\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R, v)\n loss = self.model._loss(input, target)\n grads_p = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.sub_(2 * R, v)\n loss = self.model._loss(input, target)\n grads_n = torch.autograd.grad(loss, self.model.arch_parameters())\n\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(R, v)\n\n return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]\n\n def compute_dw(self, input_train, target_train, input_valid, target_valid,\n lr, layers, network_optimizer, unrolled):\n self.zero_grads(self.model.parameters())\n self.zero_grads(self.model.arch_parameters())\n if unrolled:\n self._backward_step_unrolled(input_train, target_train, input_valid, target_valid,\n lr, layers, network_optimizer, False)\n else:\n self._backward_step(input_valid, target_valid, False)\n\n self.grads = [v.grad + self.weight_decay * v for v in self.model.arch_parameters()]\n return self.grads\n\n def compute_Hw(self, input_train, target_train, input_valid, target_valid,\n lr, layers, network_optimizer, unrolled):\n # logging.info('zero grad model param')\n self.zero_grads(self.model.parameters())\n # logging.info('zero grad arch param')\n self.zero_grads(self.model.arch_parameters())\n # if unrolled:\n # self._backward_step_unrolled(input_train, target_train, input_valid, target_valid,\n # lr, layers, network_optimizer, True)\n # else:\n # self._backward_step(input_valid, target_valid, True)\n\n # self.grads = [v.grad + self.weight_decay*v for v in self.model.arch_parameters()]\n # logging.info('compute loss')\n loss = self.model._loss(input_valid, target_valid)\n # logging.info('compute hessian')\n self.hessian = self._hessian(loss, self.model.arch_parameters())\n return self.hessian\n\n def compute_eigenvalues(self):\n # hessian = self.compute_Hw(input, target)\n if self.hessian is None:\n raise ValueError\n return eigvals(self.hessian.cpu().data.numpy())\n\n def zero_grads(self, parameters):\n for p in parameters:\n if p.grad is not None:\n p.grad.detach_()\n p.grad.zero_()\n # if p.grad.volatile:\n # p.grad.data.zero_()\n # else:\n # data = p.grad.data\n # p.grad = Variable(data.new().resize_as_(data).zero_())\n\n def gradient(self, _outputs, _inputs, grad_outputs=None, retain_graph=None,\n create_graph=False):\n if torch.is_tensor(_inputs):\n _inputs = [_inputs]\n else:\n _inputs = list(_inputs)\n grads = torch.autograd.grad(_outputs, _inputs, grad_outputs,\n allow_unused=True,\n retain_graph=retain_graph,\n create_graph=create_graph)\n grads = [x if x is not None else torch.zeros_like(y) for x, y in zip(grads,\n _inputs)]\n return torch.cat([x.contiguous().view(-1) for x in grads])\n\n def _hessian(self, outputs, inputs, out=None, allow_unused=False,\n create_graph=False):\n # assert outputs.data.ndimension() == 1\n\n if torch.is_tensor(inputs):\n inputs = [inputs]\n else:\n inputs = list(inputs)\n\n n = sum(p.numel() for p in inputs)\n if out is None:\n out = Variable(torch.zeros(n, n)).type_as(outputs)\n\n ai = 0\n for i, inp in enumerate(inputs):\n # logging.info('input {}'.format(i))\n # logging.info('grad')\n [grad] = torch.autograd.grad(outputs, inp, create_graph=True,\n allow_unused=allow_unused)\n grad = grad.contiguous().view(-1) + self.weight_decay * inp.view(-1)\n # grad = outputs[i].contiguous().view(-1)\n\n for j in range(inp.numel()):\n # logging.info('input {}\\'s{}'.format(i,j))\n # print('(i, j): ', i, j)\n if grad[j].requires_grad:\n # logging.info('grad grad')\n row = self.gradient(grad[j], inputs[i:], retain_graph=True)[j:]\n else:\n n = sum(x.numel() for x in inputs[i:]) - j\n row = Variable(torch.zeros(n)).type_as(grad[j])\n # row = grad[j].new_zeros(sum(x.numel() for x in inputs[i:]) - j)\n\n out.data[ai, ai:].add_(row.clone().type_as(out).data) # ai's row\n if ai + 1 < n:\n out.data[ai + 1:, ai].add_(row.clone().type_as(out).data[1:]) # ai's column\n del row\n ai += 1\n del grad\n return out\n" ]
[ [ "torch.zeros", "torch.zeros_like", "torch.is_tensor", "torch.autograd.grad", "torch.autograd.Variable" ] ]
nivibilla/EfficientHTR
[ "29b7df216cf4d051c533a48715d696b351377d09", "29b7df216cf4d051c533a48715d696b351377d09" ]
[ "wordDetectorNN/src/infer - Copy.py", "wordDetectorNN/src/eval - Copy.py" ]
[ "import argparse\n\nimport torch\nfrom path import Path\nfrom torch._C import device\n\nfrom dataloader import DataLoaderImgFile\nfrom eval import evaluate\nfrom net import WordDetectorNet\nfrom visualization import visualize_and_plot\n\n\ndef infer(device='cuda'):\n\n net = WordDetectorNet()\n net.load_state_dict(torch.load('../model/weights', map_location=device))\n net.eval()\n net.to(device)\n\n loader = DataLoaderImgFile(Path('../data/test'), net.input_size, device)\n res = evaluate(net, loader, max_aabbs=1000)\n\n for i, (img, aabbs) in enumerate(zip(res.batch_imgs, res.batch_aabbs)):\n f = loader.get_scale_factor(i)\n aabbs = [aabb.scale(1 / f, 1 / f) for aabb in aabbs]\n img = loader.get_original_img(i)\n visualize_and_plot(img, aabbs)", "import argparse\nfrom collections import namedtuple\n\nimport numpy as np\nimport torch\nfrom path import Path\n\nfrom aabb import AABB\nfrom aabb_clustering import cluster_aabbs\nfrom coding import decode, fg_by_cc\nfrom dataloader import DataLoaderIAM\nfrom dataset import DatasetIAM, DatasetIAMSplit\nfrom iou import compute_dist_mat_2\nfrom loss import compute_loss\nfrom net import WordDetectorNet\nfrom utils import compute_scale_down\nfrom visualization import visualize_and_plot\n\nEvaluateRes = namedtuple('EvaluateRes', 'batch_imgs,batch_aabbs,loss,metrics')\n\n\nclass BinaryClassificationMetrics:\n def __init__(self, tp, fp, fn):\n self.tp = tp\n self.fp = fp\n self.fn = fn\n\n def accumulate(self, other):\n tp = self.tp + other.tp\n fp = self.fp + other.fp\n fn = self.fn + other.fn\n return BinaryClassificationMetrics(tp, fp, fn)\n\n def recall(self):\n return self.tp / (self.tp + self.fp) if self.tp + self.fp > 0 else 0\n\n def precision(self):\n return self.tp / (self.tp + self.fn) if self.tp + self.fn > 0 else 0\n\n def f1(self):\n re = self.recall()\n pr = self.precision()\n return 2 * pr * re / (pr + re) if pr + re > 0 else 0\n\n\ndef binary_classification_metrics(gt_aabbs, pred_aabbs):\n iou_thres = 0.7\n\n ious = 1 - compute_dist_mat_2(gt_aabbs, pred_aabbs)\n match_counter = (ious > iou_thres).astype(np.int)\n gt_counter = np.sum(match_counter, axis=1)\n pred_counter = np.sum(match_counter, axis=0)\n\n tp = np.count_nonzero(pred_counter == 1)\n fp = np.count_nonzero(pred_counter == 0)\n fn = np.count_nonzero(gt_counter == 0)\n\n return BinaryClassificationMetrics(tp, fp, fn)\n\n\ndef evaluate(net, loader, thres=0.5, max_aabbs=None):\n batch_imgs = []\n batch_aabbs = []\n loss = 0\n\n for i in range(len(loader)):\n # get batch\n loader_item = loader[i]\n with torch.no_grad():\n # loader_item.batch_imgs = torch.from_numpy(processedImg).to(self.device)\n y = net(loader_item.batch_imgs, apply_softmax=True)\n y_np = y.to('cpu').numpy()\n if loader_item.batch_gt_maps is not None:\n loss += compute_loss(y, loader_item.batch_gt_maps).to('cpu').numpy()\n\n scale_up = 1 / compute_scale_down(WordDetectorNet.input_size, WordDetectorNet.output_size)\n metrics = BinaryClassificationMetrics(0, 0, 0)\n for i in range(len(y_np)):\n img_np = loader_item.batch_imgs[i, 0].to('cpu').numpy() # Actual processed image\n pred_map = y_np[i] # processed image mapped to net\n\n aabbs = decode(pred_map, comp_fg=fg_by_cc(thres, max_aabbs), f=scale_up)\n h, w = img_np.shape\n aabbs = [aabb.clip(AABB(0, w - 1, 0, h - 1)) for aabb in aabbs] # bounding box must be inside img\n clustered_aabbs = cluster_aabbs(aabbs)\n\n if loader_item.batch_aabbs is not None:\n curr_metrics = binary_classification_metrics(loader_item.batch_aabbs[i], clustered_aabbs)\n metrics = metrics.accumulate(curr_metrics)\n\n batch_imgs.append(img_np)\n batch_aabbs.append(clustered_aabbs)\n\n return EvaluateRes(batch_imgs, batch_aabbs, loss / len(loader), metrics)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, default=10)\n parser.add_argument('--data_dir', type=Path, required=True)\n args = parser.parse_args()\n\n net = WordDetectorNet()\n net.load_state_dict(torch.load('../model/weights'))\n net.eval()\n net.to('cuda')\n\n dataset = DatasetIAM(args.data_dir, net.input_size, net.output_size, caching=False)\n dataset_eval = DatasetIAMSplit(dataset, 0, 10)\n loader = DataLoaderIAM(dataset_eval, args.batch_size, net.input_size, net.output_size)\n\n res = evaluate(net, loader, max_aabbs=1000)\n print(f'Loss: {res.loss}')\n print(f'Recall: {res.metrics.recall()}')\n print(f'Precision: {res.metrics.precision()}')\n print(f'F1 score: {res.metrics.f1()}')\n\n for img, aabbs in zip(res.batch_imgs, res.batch_aabbs):\n visualize_and_plot(img, aabbs)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load" ], [ "torch.no_grad", "numpy.sum", "numpy.count_nonzero", "torch.load" ] ]
khose/mseg-api
[ "ae649c40918a5fc8a14a88a12b01b8032cc53492" ]
[ "mseg/utils/txt_utils.py" ]
[ "#!/usr/bin/python3\n\nimport numpy as np\nfrom pathlib import Path\nfrom typing import List, Tuple\n\n_ROOT = Path(__file__).resolve().parent.parent\n\ndef read_txt_file(txt_fpath: str, strip_newlines: bool=False) -> List[str]:\n\t\"\"\" \n\t\tArgs:\n\t\t-\ttxt_fpath: string representing path to txt file\n\n\t\tReturns:\n\t\t-\ttxt_lines: list of strings, one per line of file\n\t\"\"\"\n\twith open(txt_fpath, 'r') as f:\n\t\ttxt_lines = f.readlines()\n\n\tif strip_newlines:\n\t\ttxt_lines = [line.strip() for line in txt_lines]\n\treturn txt_lines\n\n\ndef get_last_n_path_elements_as_str(fpath: str, n: int) -> str:\n\t\"\"\"\n\t\tArgs:\n\t\t-\tfpath: string representing file path\n\t\t-\tn: integer representing last number of filepath elements to keep\n\n\t\tReturns:\n\t\t-\t\n\t\"\"\"\n\telements = fpath.split('/')[-n:]\n\treturn '/'.join(elements)\n\n\ndef write_txt_lines(save_fpath: str, txt_lines: List[str]) -> None:\n\t\"\"\" Note that this function will add a carriage return, so please be \n\t\tmindful of this. Freshly loaded lines from a file will have carriage\n\t\treturns, by default.\n\n\t\tArgs:\n\t\t-\tsave_fpath: string representing file path, where file should be saved\n\t\t-\ttxt_lines: \n\n\t\tReturns:\n\t\t-\tNone\n\t\"\"\"\n\twith open(save_fpath, 'w') as f:\n\t\tfor txt_line in txt_lines:\n\t\t\tf.write(f'{txt_line}\\n')\n\ndef read_rgb_and_label_tuple_file(fpath: str) -> Tuple[List[str],List[str]]:\n\t\"\"\"\n\t\tArgs:\n\t\t-\tfpath\n\n\t\tReturns:\n\t\t-\trgb_img_fpaths\n\t\t-\tlabel_fpaths\n\t\"\"\"\n\ttuples = list(np.genfromtxt(fpath, delimiter='\\n', dtype=str))\n\ttuples = [ line.split(' ') for line in tuples ]\n\trgb_img_fpaths = [ elements[0] for elements in tuples ]\n\tlabel_fpaths = [ elements[1] for elements in tuples ]\n\n\treturn rgb_img_fpaths, label_fpaths\n\n\n\ndef subsample_txt_lines(txt_fpath: str, save_fpath: str, subsample_nth: str) -> None:\n\t\"\"\"\n\t\tArgs:\n\t\t-\ttxt_fpath:\n\t\t-\tsave_fpath:\n\t\t-\tsubsample_nth:\n\n\t\tReturns:\n\t\t-\tNone\n\t\"\"\"\n\ttxt_lines = read_txt_file(txt_fpath)\n\ttxt_lines = [line.strip() for line in txt_lines]\n\twrite_txt_lines(save_fpath, txt_lines[::subsample_nth])\n\n\n\ndef generate_all_img_label_pair_fpaths(data_root: str, split_txt_fpath: str):\n\t\"\"\" \n\t\tArgs:\n\t\t-\t\n\n\t\tReturns:\n\t\t-\t\n\t\"\"\"\n\tpairs = []\n\trgb_img_fpaths, label_fpaths = read_rgb_and_label_tuple_file(split_txt_fpath)\n\tfor rel_rgb_fpath, rel_label_fpath in zip(rgb_img_fpaths, label_fpaths):\n\t\timg_fpath = f'{data_root}/{rel_rgb_fpath}'\n\t\tlabel_fpath = f'{data_root}/{rel_label_fpath}'\n\t\tpairs += [(img_fpath,label_fpath)]\n\treturn pairs\n\n\ndef generate_all_img_label_pair_relative_fpaths(dname: str, split: str):\n\t\"\"\" \n\t\tArgs:\n\t\t-\tdname:\n\t\t-\tsplit: e.g. 'train', 'val', 'trainval', etc.\n\n\t\tReturns:\n\t\t-\t\n\t\"\"\"\n\tsplit_txt_fpath = _ROOT / f'dataset_lists/{dname}/list/{split}.txt'\n\tpairs = []\n\trgb_img_fpaths, label_fpaths = read_rgb_and_label_tuple_file(split_txt_fpath)\n\treturn list(zip(rgb_img_fpaths, label_fpaths))\n\n" ]
[ [ "numpy.genfromtxt" ] ]
SRooke/COG_adm2
[ "ebdc2e5523bddf93ad4b102f49cf88bcb0d86653" ]
[ "sjoin.py" ]
[ "\"\"\"\"Spatial joins for GeoPandas\n\nThe general algorithm for these spatial join functions is:\n 1. Use a spatial index (rtree in the current implementation) to roughly compute\n the results. Because rtree operates on bounding boxes, this result is not final\n and must be refined, but refines the search to a much smaller subset of geometries\n in most cases.\n 2. Use basic geometric operations on each of the matches from the spatial index\n query to get an exact result. This operation can be slow since it is iterative.\n In the future, this may be sped up by vectorizing operations, see PR#1154.\n\nIn order to avoid duplication, functionality required by all (current) spatial\njoin implementations has been moved to helper methods, resulting in the following\noverall flow:\n 1. Input checks: delegated to _basic_checks()\n 2. Rename left_df and right_df indexes for compatibility with rtree (which only\n works with numeric indexes): delegated to _rename_indexes()\n 3. Compute raw result of spatial join (i.e. which indexes match) using the\n algorithm mentioned above (and handle the \"op\" parameter in the case of\n sjoin): this happens within the public user-facing sjoin_... functions.\n 4. Take the raw result from (3) and join left_df and right_df according to the\n matches and the \"how\" parameter: delegated to _join_results.\n\nCurrently, 2 types of spatial join are implemented:\n * sjoin: spatial join with basic binary predicates (intersection, contains and\n within)\n * sjoin_nearest: matches the nearest geometries.\n\"\"\"\n\nfrom warnings import warn\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame, base\n\n\nRTREE_VERSION = \"\" # string to match expected type from rtree.__version__\nif base.HAS_SINDEX:\n import rtree\n\n RTREE_VERSION = rtree.__version__\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames based on binary predicates.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n # ------------------------------ CHECK INPUTS ------------------------------\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n _basic_checks(left_df, right_df)\n\n # ----------------- RENAME INDEXES FOR RTREE COMPATIBILITY -----------------\n (\n left_df,\n right_df,\n left_index_name,\n right_index_name,\n index_left,\n index_right,\n ) = _rename_indexes(left_df, right_df, lsuffix, rsuffix)\n\n # ------------------------ COMPUTE SPATIAL JOIN ----------------------------\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe. If we are joining to an empty dataframe,\n # don't bother generating the index\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex if len(left_df) > 0 else None\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex if len(right_df) > 0 else None\n tree_idx_right = False\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index. If tree_idx does not exist, it is due to either a\n # failure to generate the index (e.g., if the column is empty), or the\n # other dataframe is empty so it wasn't necessary to generate it.\n if tree_idx_right and tree_idx:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n elif not tree_idx_right and tree_idx:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations (no added speed, just convenience)\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n # ------------------- HANDLE HOW PARAM, CREATE FINAL DF --------------------\n return _join_results(\n result,\n left_df,\n right_df,\n how,\n left_index_name,\n right_index_name,\n index_left,\n index_right,\n lsuffix,\n rsuffix,\n )\n\n\ndef sjoin_nearest(\n left_df,\n right_df,\n how=\"inner\",\n lsuffix=\"left\",\n rsuffix=\"right\",\n search_radius=None,\n max_search_neighbors=50,\n nearest_distances=False,\n):\n \"\"\"Spatial join of two GeoDataFrames, matching by nearest neighbor.\n Results can be restricted to a radius using the search_radius parameter.\n If the search is not bounded by a radius, execution can be sped up by\n using the max_search_neighbors parameter.\n Mixing both options usually does not improve performance over using only\n search_radius and is not recommended.\n See below for details on choosing a value for max_search_neighbors.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n max_search_neighbors : int, default 50\n Number of nearest neighbors to check for proximity.\n Useful if you do not want to use search_radius.\n note:: using a very small number (~< 10) can cause unexpected results\n due to the implementation of the underlaying spatial index.\n Geometries are converted to bounding boxes, thus results for the\n nearest geometry may unexpected. Using a larger number mitigates\n this, but using too large of a number will provide little performance\n improvement.\n If it is not too slow, you may want to run at least once with\n max_search_neighbors=None (or at least a larger number) to validate\n results for your data.\n search_radius : int or float, default None\n Restricts search to a certain radius.\n This can significantly speed up execution.\n If using a tight search radius, it is recommended that you do not use the\n max_search_neighbors option as it will not improve performance.\n If you are not using search_radius, consider using max_search_neighbors\n to speed up execution.\n nearest_distances: bool, default False\n If True, report the distance for each match in new column\n named \"nearest_distances\".\n \"\"\"\n # ------------------------------ CHECK INPUTS ------------------------------\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n _basic_checks(left_df, right_df)\n\n # ----------------- RENAME INDEXES FOR RTREE COMPATIBILITY -----------------\n (\n left_df,\n right_df,\n left_index_name,\n right_index_name,\n index_left,\n index_right,\n ) = _rename_indexes(left_df, right_df, lsuffix, rsuffix)\n\n # ------------------------ COMPUTE SPATIAL JOIN ----------------------------\n\n # get spatial index\n tree_idx = right_df.sindex if len(right_df) > 0 else None\n\n # validate max_search_neighbors and search_radius params\n if max_search_neighbors is not None:\n if not isinstance(max_search_neighbors, int) or max_search_neighbors < 1:\n # warn about using rtree < 0.9.4 and max_search_neighbors option\n # see https://github.com/Toblerity/rtree/pull/141\n raise ValueError(\"max_search_neighbors must be an integer and >= 1\")\n\n if str(RTREE_VERSION) < LooseVersion(\"0.9.4\"):\n warn(\n \"Using an rtree version < 0.9.4 may cause inconsistent \"\n \"results when using max_search_neighbors. Consider using a \"\n \"large number or max_search_neighbors=None.\"\n )\n\n if search_radius is not None and search_radius < 0:\n raise ValueError(\"search_radius must be >= 0\")\n\n def _query_index(geo_in_l, buff_geo_in_l):\n \"\"\"\n Queries the spatial index to filter results.\n \"\"\"\n # restric by radius\n if search_radius is not None:\n in_radius = list(tree_idx.intersection(buff_geo_in_l))\n check_radius = True\n else:\n in_radius = right_df.index\n check_radius = False\n # find neighbors\n if max_search_neighbors is not None:\n neighbors = list(\n tree_idx.nearest(geo_in_l.bounds, num_results=max_search_neighbors)\n )\n check_neighbors = True\n else:\n neighbors = right_df.index\n check_neighbors = False\n\n if check_radius and check_neighbors:\n return np.array(\n [int(idx) for idx in neighbors if idx in in_radius], dtype=int\n )\n elif check_radius:\n return np.array(in_radius, dtype=int)\n elif check_neighbors:\n return np.array(neighbors, dtype=int)\n else:\n return right_df.index # check all indexes in right_df\n\n if tree_idx is not None:\n # the accuracy of the spatial index is limited, so we need to manually\n # check each set of matches for actual distance\n l_idx = []\n r_idx = []\n distances = []\n\n # pre-buffer the bounds of right_df geometries by search radius\n # these will be used by the spatial index\n if search_radius is not None:\n bbox_delta = np.array(\n [-search_radius, -search_radius, search_radius, search_radius]\n ) # minx, miny, maxx, maxy\n buff_geos_in_l = (left_df.geometry.bounds + bbox_delta).values\n else:\n buff_geos_in_l = left_df.geometry.bounds.values\n\n for ind_in_left, (buff_geo_in_l, geo_in_l) in enumerate(\n zip(buff_geos_in_l, left_df.geometry)\n ):\n if not geo_in_l or geo_in_l.is_empty:\n # see https://github.com/Toblerity/Shapely/issues/799\n continue\n possible_nearest = _query_index(geo_in_l, buff_geo_in_l)\n min_dist = np.inf # initialize\n min_ind = [] # initialize\n for ind_in_right, geo_in_r in zip(\n possible_nearest, right_df.geometry.iloc[possible_nearest],\n ):\n if not geo_in_r or geo_in_r.is_empty:\n # see https://github.com/Toblerity/Shapely/issues/799\n continue\n dist = geo_in_l.distance(geo_in_r)\n if dist < min_dist and (search_radius is None or dist <= search_radius):\n # new closest\n min_dist = dist # reset\n min_ind = [ind_in_right] # re-initialize\n elif dist == min_dist:\n # matching closest, extend list of closest\n min_ind.append(ind_in_right)\n # extend final results\n if min_ind: # possible_nearest may have been empty\n r_idx.extend(min_ind)\n l_idx.extend([ind_in_left] * len(min_ind))\n if nearest_distances: # avoid memory use if unwarrented\n distances.extend([min_dist] * len(min_ind))\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # assemble resultant df\n result = pd.DataFrame(np.column_stack([l_idx, r_idx]))\n result.columns = [\"_key_left\", \"_key_right\"]\n\n if nearest_distances:\n result[\"nearest_distances\"] = distances\n\n if tree_idx is None or len(l_idx) == 0 or len(r_idx) == 0:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n # ------------------- HANDLE HOW PARAM, CREATE FINAL DF --------------------\n return _join_results(\n result,\n left_df,\n right_df,\n how,\n left_index_name,\n right_index_name,\n index_left,\n index_right,\n lsuffix,\n rsuffix,\n )\n\n\ndef _basic_checks(left_df, right_df):\n \"\"\"\n Helper method for other sjoin methods.\n Runs type checks and crs checks.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrame\n The geodataframe's being joined.\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n # check that rtree is installed\n if not RTREE_VERSION:\n raise RuntimeError(\"Spatial joins require `rtree`.\")\n\n\ndef _rename_indexes(left_df, right_df, lsuffix, rsuffix):\n \"\"\"\n Helper method for other sjoin methods.\n Renames indexes to numeric for compatibility with rtree.\n Returns renamed DataFrames as well as old indexes and their names.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrame\n The geodataframe's being joined.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n Returns\n -------\n left_df, right_df : GeoDataFrame\n The geodataframe's being joined, with indexes renamed.\n index_left : string\n Original index for left_df, to be restored by _join_results.\n index_right : string\n Original index for right_df, to be restored by _join_results\n left_index_name : string\n Original index name for left_df, to be restored by _join_results\n right_index_name : string\n Original index name for right_df, to be restored by _join_results\n \"\"\"\n\n # store index names\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n try:\n left_index_name = left_df.index.name\n left_df.index = left_df.index.rename(index_left)\n except TypeError:\n index_left = [\n \"index_%s\" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)\n ]\n left_index_name = left_df.index.names\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n\n right_df = right_df.copy(deep=True)\n try:\n right_index_name = right_df.index.name\n right_df.index = right_df.index.rename(index_right)\n except TypeError:\n index_right = [\n \"index_%s\" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)\n ]\n right_index_name = right_df.index.names\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n return left_df, right_df, left_index_name, right_index_name, index_left, index_right\n\n\ndef _join_results(\n result,\n left_df,\n right_df, # data\n how, # how to join the dataframes\n left_index_name,\n right_index_name, # original names for the ind\n index_left,\n index_right, # original indexes\n lsuffix,\n rsuffix, # suffixes for merged column names\n):\n \"\"\"\n Helper method for other sjoin methods.\n Takes result DataFrame and handles the application of the \"how\" parameter.\n\n Parameters\n ----------\n result: GeoDataFrame\n The result of applying the spatial join operation, containing columns\n \"_key_left\" and \"_key_right\" inidicating which entries to match.\n left_df, right_df : GeoDataFrame\n The geodataframe's being joined, with indexes renamed.\n how : string\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n left_index_name : string\n Original index name for left_df to be restored.\n right_index_name : string\n Original index name for right_df to be restored.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n Returns\n -------\n joined: DataFram\n The final result of a spatial join.\n \"\"\"\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True)\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True, how=\"left\")\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n if isinstance(index_right, list):\n joined.index.names = right_index_name\n else:\n joined.index.name = right_index_name\n\n return joined\n" ]
[ [ "pandas.DataFrame", "numpy.concatenate", "numpy.vectorize", "numpy.column_stack", "numpy.array", "numpy.empty" ] ]
Ericcsr/ClothFromDepth
[ "c21bbcceac4b36223f99a3f1177637296553dce0" ]
[ "clothsim.py" ]
[ "from numpy.core.arrayprint import dtype_is_implied\nfrom numpy.lib.utils import _set_function_name\nimport taichi as ti\nimport open3d as o3d\nimport numpy as np\n\n# TODO: Taichi Visualization need the field size to be actual particle number which limit flexibility\nMAX_VERTICES = 509\nMAX_FACES = 924\n# If a vertex connect more than 10 verticies then, the mesh construction has some problem\nMAX_NEIGHBOR = 10\nMAX_BODIES = 1\nELASTICITY = 0.4\n\n# Define Ball Types\nBALL_STICK = 1\nBALL_SLIDE = 2\n\[email protected]_oriented\nclass ClothSim:\n def __init__(\n self,\n init_mesh = None,\n gravity=0.2,\n stiffness = 1000,\n damping = 2.0,\n unit_length = 1.0,\n dt = 5e-4,\n n_substeps = 50):\n self.gravity = gravity\n self.stiffness = stiffness\n self.damping = damping\n self.dt = dt\n # Initialize with conservative estimation, since mesh may be updated \n # afterward\n self.N = 0\n self.num_triangles = 0\n self.unit_length = unit_length\n self.elasticity = ELASTICITY\n self.num_substeps = n_substeps\n\n self.x = ti.Vector.field(3, float, MAX_VERTICES)\n self.x_rest = ti.Vector.field(3, float, MAX_VERTICES)\n self.v = ti.Vector.field(3, float, MAX_VERTICES)\n self.adj_list = ti.field(ti.int32, shape=(MAX_VERTICES, MAX_NEIGHBOR))\n self.adj_num = ti.field(ti.int32, MAX_VERTICES)\n self.triangles = ti.Vector.field(3, ti.int32, MAX_FACES)\n self.indices = ti.field(int, shape = MAX_FACES*3)\n\n # Here we assume all the rigid body are balls\n self.num_objects = 0\n self.balls_x = ti.Vector.field(3, float, MAX_BODIES) # Should be ti.Vector center of balls centers\n self.balls_x_rest = ti.Vector.field(3, float, MAX_BODIES)\n self.balls_x_inc = ti.Vector.field(3, float, MAX_BODIES)\n self.balls_v = ti.Vector.field(3, float, MAX_BODIES) # Should be ti.Vector velocity of balls centers\n self.balls_v_inc = ti.Vector.field(3, float, MAX_BODIES)\n self.balls_r = ti.field(float, MAX_BODIES) # Should be float radius of ball\n self.balls_type = ti.field(ti.int32, MAX_BODIES)\n self.render_initialized = False\n if init_mesh != None:\n self.load_mesh(init_mesh)\n\n # This function cannot be called within gradient tape\n def load_mesh(self, mesh):\n '''Load mesh from open3d\n Parameters:\n mesh : open3d triangular mesh\n Return:\n success flag : bool \n '''\n self.cloth_mesh = mesh\n self.cloth_mesh.remove_duplicated_vertices()\n self.cloth_mesh.compute_adjacency_list()\n links = [np.array(list(v)) for v in self.cloth_mesh.adjacency_list]\n faces = np.asarray(self.cloth_mesh.triangles)\n self.num_triangles = len(faces)\n assert(self.num_triangles <= MAX_FACES)\n print(self.num_triangles)\n # Extrapolate\n faces = np.vstack([faces, np.zeros((MAX_FACES-self.num_triangles, 3))])\n self.triangles.from_numpy(faces)\n self.indices.from_numpy(faces.flatten())\n\n vertices = np.asarray(self.cloth_mesh.vertices)\n vertices[:,0] += 0.4\n self.N = len(vertices)\n assert(self.N <= MAX_VERTICES)\n print(self.N)\n # Extrapolate\n vertices = np.vstack([vertices, np.zeros((MAX_VERTICES - self.N, 3))])\n self.x.from_numpy(vertices)\n self.x_rest.from_numpy(vertices)\n self.v.from_numpy(np.zeros_like(vertices))\n \n links_np = np.zeros((MAX_VERTICES, MAX_NEIGHBOR))\n links_len = np.array([len(link) for link in links])\n assert(np.max(links_len) <= MAX_NEIGHBOR)\n links_len = np.hstack([links_len, np.zeros(MAX_VERTICES-self.N)])\n for i, link in enumerate(links):\n links_np[i, :len(link)] = link\n self.adj_list.from_numpy(links_np)\n self.adj_num.from_numpy(links_len)\n return True\n\n def initialize_renderer(self):\n self.window = ti.ui.Window(\"Cloth\", (800, 800), vsync=True)\n self.canvas = self.window.get_canvas()\n self.scene = ti.ui.Scene()\n self.camera = ti.ui.make_camera()\n self.render_initialized = True\n\n def load_objects(self,centers, radius, collision_type):\n '''Load rigid bodies that can interact with clothes\n Parameters:\n centers : np.ndarray(n_objects, 3)\n radius : np.ndarray(n_objects)\n collision_type : np.ndarray(n_objects)\n Return:\n success_flag : bool\n '''\n assert(len(centers) == len(radius))\n self.num_objects = len(centers)\n n_objects = len(centers)\n centers = np.vstack([centers, np.zeros((MAX_BODIES - n_objects, 3))])\n self.balls_x.from_numpy(centers)\n self.balls_x_rest.from_numpy(centers)\n self.balls_v.from_numpy(np.zeros_like(centers))\n self.balls_x_inc.from_numpy(np.zeros_like(centers))\n self.balls_v_inc.from_numpy(np.zeros_like(centers))\n radius = np.hstack([radius, np.zeros(MAX_BODIES - n_objects)])\n self.balls_r.from_numpy(radius)\n collision_type = np.hstack([collision_type, np.zeros(MAX_BODIES - n_objects)])\n self.balls_type.from_numpy(collision_type)\n return True\n \n @ti.func\n def collide_pair(self,i, j):\n imp = ti.Vector([0.0, 0.0, 0.0])\n x_inc_contrib = ti.Vector([0.0, 0.0, 0.0])\n if i!=j:\n dist = (self.balls_x[i] + self.dt * self.balls_v[i])\\\n - (self.balls_x[j] + self.dt * self.balls_v[j])\n dist_norm = dist.norm()\n mini_gap = self.balls_r[i] + self.balls_r[j]\n if dist_norm < mini_gap:\n dir = dist.normalized()\n rela_v = self.balls_v[i] - self.balls_v[j]\n projected_v = dir.dot(rela_v)\n if projected_v < 0:\n imp = -(1 + self.elasticity) * 0.5 * projected_v * dir\n toi = (dist_norm - mini_gap) / min(-1e-3, projected_v)\n x_inc_contrib = min(toi - self.dt, 0) * imp\n\n self.balls_x_inc[i] += x_inc_contrib\n self.balls_v_inc[i] += imp\n\n @ti.func\n def collide(self):\n for i in range(self.num_objects):\n self.balls_v_inc[i] = ti.Vector([0.0, 0.0, 0.0])\n self.balls_x_inc[i] = ti.Vector([0.0, 0.0, 0.0])\n for i in range(self.num_objects):\n for j in range(i):\n self.collide_pair(i, j)\n for i in range(self.num_objects):\n for j in range(i+1, self.num_objects):\n self.collide_pair(i, j)\n \n @ti.kernel\n def substep(self):\n '''\n Substep for physical object update\n \n '''\n for i in range(self.N):\n self.v[i].y -= self.gravity * self.dt\n \n for i in range(self.N):\n for neighbor in range(self.adj_num[i]):\n relative_pos = self.x[self.adj_list[i, neighbor]] - self.x[i]\n current_length = relative_pos.norm()\n rest_pos = self.x_rest[self.adj_list[i, neighbor]] - self.x_rest[i]\n rest_length = rest_pos.norm()\n self.v[i] += (self.stiffness * self.unit_length / rest_length)\\\n * relative_pos.normalized() * (current_length - rest_length) * self.dt\n\n for i in range(self.N):\n self.v[i] *= ti.exp(-self.damping * self.dt)\n for j in ti.static(range(self.num_objects)):\n r = self.x[i] - self.balls_x[j]\n if r.norm() <= self.balls_r[j]:\n if self.balls_type[j] == BALL_STICK:\n self.v[i] = ti.Vector([0.0, 0.0, 0.0])\n else:\n proj_v = self.v[i].dot(r.normalized())\n norm_v = proj_v * r.normalized()\n tang_v = self.v[i] - norm_v\n if proj_v <=0:\n norm_ball_v = self.balls_v[j].dot(r.normalized()) * r.normalized()\n self.v[i] = tang_v + norm_ball_v\n self.x[i] += self.dt * self.v[i]\n \n \n # Update the position of balls need to handle collision\n # Can follow the idea of billard in difftaichi\n self.collide()\n for i in range(self.num_objects):\n self.balls_v[i] = self.balls_v[i] + self.balls_v_inc[i]\n self.balls_x[i] = self.balls_x[i] + self.dt * self.balls_v[i] + self.balls_x_inc[i]\n\n def step(self,action=None):\n \"\"\"step by action\n action : np.ndarray (3 * n_objects)\n \"\"\"\n if action == None:\n action = np.zeros(self.num_objects * 3)\n assert(len(action)//3 == self.num_objects)\n for i in range(self.num_objects):\n self.balls_v[i] = action[3*i:3*(i+1)]\n\n for _ in range(self.num_substeps):\n self.substep()\n\n def render(self):\n if not self.render_initialized:\n print(\"Render Not initialized\")\n exit(-1)\n self.camera.position(0.5, -0.5, 2)\n self.camera.lookat(0.5, -0.5, 0)\n self.scene.set_camera(self.camera)\n self.scene.point_light(pos=(0.5, 1, 2), color=(1, 1, 1))\n self.scene.mesh(self.x,\n indices = self.indices,\n color=(0.5, 0.5, 0.5),\n two_sided=True)\n self.scene.particles(self.balls_x, radius = 0.1, color=(0.5, 0, 0))\n self.canvas.scene(self.scene)\n self.window.show()\n\n @ti.kernel\n def reset(self):\n for i in range(self.N):\n self.x[i] = self.x_rest[i]\n self.v[i] = ti.Vector([0.0, 0.0, 0.0])\n\n for i in range(self.num_objects):\n self.balls_x[i] = self.balls_x_rest[i]\n self.balls_v[i] = ti.Vector([0.0, 0.0, 0.0])\n \n def change_all_type(self, collision_type):\n collision_type = np.hstack([collision_type, np.zeros(MAX_BODIES - self.num_objects)])\n self.balls_type.from_numpy(collision_type)\n\n def change_type_index(self,index, collision_type):\n assert(index < self.num_objects and (collision_type==1 or collision_type==2))\n self.balls_type[index] = collision_type\n \nif __name__ == \"__main__\":\n ti.init(arch = ti.cpu)\n clothmesh = o3d.io.read_triangle_mesh(\"./test2.obj\")\n sim = ClothSim(init_mesh=clothmesh)\n sim.initialize_renderer()\n centers = np.array([[0.5, -0.5, 0.0]])\n radius = np.array([0.1])\n type = np.array([BALL_SLIDE])\n sim.load_objects(centers, radius, type)\n for s in range(1000):\n sim.step()\n sim.render()\n\n" ]
[ [ "numpy.asarray", "numpy.max", "numpy.zeros_like", "numpy.array", "numpy.zeros" ] ]
lxcode/twarc-csv
[ "60606a4e62c41d46324907dbfd2b55cbaec5a13f" ]
[ "twarc_csv.py" ]
[ "import os\nimport json\nimport click\nimport logging\nimport itertools\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import OrderedDict\nfrom more_itertools import ichunked\nfrom twarc.expansions import flatten\n\nlog = logging.getLogger(\"twarc\")\n\nDEFAULT_TWEET_COLUMNS = \"\"\"__twarc.retrieved_at\n__twarc.url\n__twarc.version\nattachments.media\nattachments.media_keys\nattachments.poll.duration_minutes\nattachments.poll.end_datetime\nattachments.poll.id\nattachments.poll.options\nattachments.poll.voting_status\nattachments.poll_ids\nauthor.created_at\nauthor.description\nauthor.entities.description.cashtags\nauthor.entities.description.hashtags\nauthor.entities.description.mentions\nauthor.entities.description.urls\nauthor.entities.url.urls\nauthor.id\nauthor.location\nauthor.name\nauthor.pinned_tweet_id\nauthor.profile_image_url\nauthor.protected\nauthor.public_metrics.followers_count\nauthor.public_metrics.following_count\nauthor.public_metrics.listed_count\nauthor.public_metrics.tweet_count\nauthor.url\nauthor.username\nauthor.verified\nauthor.withheld.country_codes\nauthor_id\ncontext_annotations\nconversation_id\ncreated_at\nentities.annotations\nentities.cashtags\nentities.hashtags\nentities.mentions\nentities.urls\ngeo.coordinates.coordinates\ngeo.coordinates.type\ngeo.country\ngeo.country_code\ngeo.full_name\ngeo.geo.bbox\ngeo.geo.type\ngeo.id\ngeo.name\ngeo.place_id\ngeo.place_type\nid\nin_reply_to_user.created_at\nin_reply_to_user.description\nin_reply_to_user.entities.description.cashtags\nin_reply_to_user.entities.description.hashtags\nin_reply_to_user.entities.description.mentions\nin_reply_to_user.entities.description.urls\nin_reply_to_user.entities.url.urls\nin_reply_to_user.id\nin_reply_to_user.location\nin_reply_to_user.name\nin_reply_to_user.pinned_tweet_id\nin_reply_to_user.profile_image_url\nin_reply_to_user.protected\nin_reply_to_user.public_metrics.followers_count\nin_reply_to_user.public_metrics.following_count\nin_reply_to_user.public_metrics.listed_count\nin_reply_to_user.public_metrics.tweet_count\nin_reply_to_user.url\nin_reply_to_user.username\nin_reply_to_user.verified\nin_reply_to_user.withheld.country_codes\nin_reply_to_user_id\nlang\npossibly_sensitive\npublic_metrics.like_count\npublic_metrics.quote_count\npublic_metrics.reply_count\npublic_metrics.retweet_count\nreferenced_tweets\nreply_settings\nsource\ntext\ntype\nwithheld.copyright\nwithheld.country_codes\"\"\".split(\n \"\\n\"\n)\n\nDEFAULT_USERS_COLUMNS = \"\"\"__twarc.retrieved_at\n__twarc.url\n__twarc.version\ncreated_at\ndescription\nentities.description.cashtags\nentities.description.hashtags\nentities.description.mentions\nentities.description.urls\nentities.url.urls\nid\nlocation\nname\npinned_tweet_id\npinned_tweet\nprofile_image_url\nprotected\npublic_metrics.followers_count\npublic_metrics.following_count\npublic_metrics.listed_count\npublic_metrics.tweet_count\nurl\nusername\nverified\nwithheld.country_codes\"\"\".split(\n \"\\n\"\n)\n\n\nclass CSVConverter:\n def __init__(\n self,\n infile,\n outfile,\n json_encode_all=False,\n json_encode_lists=True,\n json_encode_text=False,\n inline_referenced_tweets=True,\n inline_pinned_tweets=False,\n allow_duplicates=False,\n input_tweet_columns=True,\n input_users_columns=False,\n input_columns=\"\",\n output_columns=\"\",\n batch_size=5000,\n ):\n self.infile = infile\n self.outfile = outfile\n self.json_encode_all = json_encode_all\n self.json_encode_lists = json_encode_lists\n self.json_encode_text = json_encode_text\n self.inline_referenced_tweets = inline_referenced_tweets\n self.inline_pinned_tweets = inline_pinned_tweets\n self.allow_duplicates = allow_duplicates\n self.batch_size = batch_size\n self.dataset_ids = set()\n self.std = infile.name == \"<stdin>\" or outfile.name == \"<stdout>\"\n self.progress = tqdm(\n unit=\"B\",\n unit_scale=True,\n unit_divisor=1024,\n total=os.stat(infile.name).st_size if not self.std else 1,\n disable=self.std,\n )\n self.columns = list()\n if input_tweet_columns:\n self.columns.extend(DEFAULT_TWEET_COLUMNS)\n if input_users_columns:\n self.columns.extend(DEFAULT_USERS_COLUMNS)\n if input_columns:\n self.columns.extend(manual_columns.split(\",\"))\n\n self.output_columns = (\n output_columns.split(\",\") if output_columns else self.columns\n )\n\n self.counts = {\n \"lines\": 0,\n \"tweets\": 0,\n \"referenced_tweets\": 0,\n \"parse_errors\": 0,\n \"duplicates\": 0,\n \"rows\": 0,\n \"input_columns\": len(self.columns),\n \"output_columns\": len(self.output_columns),\n }\n\n def _read_lines(self):\n \"\"\"\n Generator for reading files line byline from a file. Progress bar is based on file size.\n \"\"\"\n line = self.infile.readline()\n while line:\n self.counts[\"lines\"] = self.counts[\"lines\"] + 1\n if line.strip() != \"\":\n try:\n o = json.loads(line)\n yield o\n except Exception as ex:\n self.counts[\"parse_errors\"] = self.counts[\"parse_errors\"] + 1\n log.error(f\"Error when trying to parse json: '{line}' {ex}\")\n if not self.std:\n self.progress.update(self.infile.tell() - self.progress.n)\n line = self.infile.readline()\n\n def _handle_formats(self, batch):\n \"\"\"\n Handle different types of json formats, generating 1 tweet at a time\n\n a batch is a number of lines from a json,\n these can be full pages of requests or individual tweets.\n \"\"\"\n for item in batch:\n # if it has a \"data\" key ensure data it is flattened\n if \"data\" in item:\n # flatten a list of tweets\n if isinstance(item[\"data\"], list):\n for i in flatten(item)[\"data\"]:\n yield i\n # flatten a single tweet, eg, from stream\n else:\n yield flatten(item)[\"data\"]\n else:\n # this assumes the data is flattened\n yield item\n\n def _inline_referenced_tweets(self, tweet):\n \"\"\"\n Insert referenced tweets into the main CSV\n \"\"\"\n if \"referenced_tweets\" in tweet and self.inline_referenced_tweets:\n for referenced_tweet in tweet[\"referenced_tweets\"]:\n # extract the referenced tweet as a new row\n self.counts[\"referenced_tweets\"] = self.counts[\"referenced_tweets\"] + 1\n yield referenced_tweet\n # leave behind the reference, but not the full tweet\n tweet[\"referenced_tweets\"] = [\n {\"type\": r[\"type\"], \"id\": r[\"id\"]} for r in tweet[\"referenced_tweets\"]\n ]\n\n # Deal with pinned tweets for user datasets:\n # Todo: This is not fully implemented!\n if self.inline_pinned_tweets:\n if \"pinned_tweet\" in tweet:\n # extract the referenced tweet as a new row\n tweet[\"pinned_tweet\"][\"type\"] = \"pinned_tweet\"\n self.counts[\"referenced_tweets\"] = self.counts[\"referenced_tweets\"] + 1\n yield referenced_tweet\n # pinned_tweet_id remains:\n tweet.pop(\"pinned_tweet\")\n\n yield tweet\n\n def _process_tweets(self, tweets):\n \"\"\"\n Process a single tweet before adding it to the dataframe.\n ToDo: Drop columns and dedupe etc here.\n \"\"\"\n for tweet in tweets:\n # Order the fields in the json, because JSON key order isn't guaranteed.\n # Needed so that different batches won't produce different ordered columns\n json_keys = sorted(tweet.keys())\n selected_field_order = list()\n\n # Opinion: always put in id,created_at,text first, and then the rest\n if \"id\" in json_keys:\n selected_field_order.append(json_keys.pop(json_keys.index(\"id\")))\n if \"created_at\" in json_keys:\n selected_field_order.append(\n json_keys.pop(json_keys.index(\"created_at\"))\n )\n if \"text\" in json_keys:\n selected_field_order.append(json_keys.pop(json_keys.index(\"text\")))\n selected_field_order.extend(json_keys)\n\n tweet = OrderedDict((k, tweet[k]) for k in selected_field_order)\n\n self.counts[\"tweets\"] = self.counts[\"tweets\"] + 1\n if tweet[\"id\"] in self.dataset_ids:\n self.counts[\"duplicates\"] = self.counts[\"duplicates\"] + 1\n\n if self.allow_duplicates:\n yield tweet\n else:\n if tweet[\"id\"] not in self.dataset_ids:\n yield tweet\n\n self.dataset_ids.add(tweet[\"id\"])\n\n def _process_dataframe(self, _df):\n # (Optional) json encode all\n if self.json_encode_all:\n _df = _df.applymap(json.dumps, na_action=\"ignore\")\n else:\n # (Optional) text escape for any text fields\n if self.json_encode_text:\n _df = _df.applymap(\n lambda x: json.dumps(x) if type(x) is str else x,\n na_action=\"ignore\",\n )\n else:\n # Mandatory newline escape to prevent breaking csv format:\n _df = _df.applymap(\n lambda x: x.replace(\"\\r\", \"\").replace(\"\\n\", r\"\\n\")\n if type(x) is str\n else x,\n na_action=\"ignore\",\n )\n # (Optional) json for lists\n if self.json_encode_lists:\n _df = _df.applymap(\n lambda x: json.dumps(x) if pd.api.types.is_list_like(x) else x,\n na_action=\"ignore\",\n )\n return _df\n\n def _process_batch(self, batch):\n\n # (Optional) append referenced tweets as new rows\n tweet_batch = itertools.chain.from_iterable(\n self._process_tweets(self._inline_referenced_tweets(tweet))\n for tweet in self._handle_formats(batch)\n )\n\n _df = pd.json_normalize([tweet for tweet in tweet_batch], errors=\"ignore\")\n\n # Check for mismatched columns\n if len(_df.columns) > len(self.columns):\n diff = set(_df.columns) - set(self.columns)\n click.echo(\n click.style(\n f\"💔 ERROR: Unexpected Data: \\n\\\"{','.join(diff)}\\\"\\n to fix, add these with --input-columns. Skipping entire batch of {len(_df)} tweets!\",\n fg=\"red\",\n ),\n err=True,\n )\n return pd.DataFrame(columns=self.columns)\n\n _df = _df.reindex(columns=self.columns)\n _df = self._process_dataframe(_df)\n\n return _df\n\n def _write_output(self, _df, first_batch):\n \"\"\"\n Write out the dataframe chunk by chunk\n\n todo: take parameters from commandline for optional output formats.\n \"\"\"\n\n if first_batch:\n mode = \"w\"\n header = True\n else:\n mode = \"a+\"\n header = False\n\n self.counts[\"rows\"] = self.counts[\"rows\"] + len(_df)\n _df.to_csv(\n self.outfile,\n mode=mode,\n columns=self.output_columns,\n index=False,\n header=header,\n ) # todo: (Optional) arguments for to_csv\n\n def process(self):\n \"\"\"\n Process a file containing JSON into a CSV\n \"\"\"\n\n # Flag for writing header & appending to CSV file\n first_batch = True\n for batch in ichunked(self._read_lines(), self.batch_size):\n self._write_output(self._process_batch(batch), first_batch)\n first_batch = False\n\n self.progress.close()\n\n\[email protected]()\[email protected](\"infile\", type=click.File(\"r\"), default=\"-\")\[email protected](\"outfile\", type=click.File(\"w\"), default=\"-\")\[email protected](\n \"--json-encode-all/--no-json-encode-all\",\n default=False,\n help=\"JSON encode / escape all fields. Default: no\",\n)\[email protected](\n \"--json-encode-lists/--no-json-encode-lists\",\n default=True,\n help=\"JSON encode / escape lists. Default: yes\",\n)\[email protected](\n \"--json-encode-text/--no-json-encode-text\",\n default=False,\n help=\"JSON encode / escape text fields. Default: no\",\n)\[email protected](\n \"--inline-referenced-tweets/--no-inline-referenced-tweets\",\n default=True,\n help=\"Output referenced tweets inline as separate rows. Default: yes\",\n)\[email protected](\n \"--inline-pinned-tweets/--no-pinned-tweets\",\n default=False,\n help=\"If converting a user dataset, output pinned tweets inline as separate rows. Default: no\",\n)\[email protected](\n \"--allow-duplicates/--no-allow-duplicates\",\n default=False,\n help=\"Remove duplicate tweets by ID. Default: yes\",\n)\[email protected](\n \"--input-tweet-columns/--no-input-tweet-columns\",\n default=True,\n help=\"Use a default list of tweet column names in the input. Only modify this if you have processed the json yourself. Default: yes\",\n)\[email protected](\n \"--input-users-columns/--no-input-users-columns\",\n default=False,\n help=\"Use a default list of user column names in the input. Only modify this if you have a dataset of users as opposed to tweets. Default: no\",\n)\[email protected](\n \"--input-columns\",\n default=\"\",\n help=\"Manually specify input columns. Comma separated string. Default is blank, no extra input columns\",\n)\[email protected](\n \"--output-columns\",\n default=\"\",\n help=\"Specify what columns to output in the CSV. Default is all input columns.\",\n)\[email protected](\n \"--batch-size\",\n type=int,\n default=5000,\n help=\"How many lines to process per chunk. Default is 5000\",\n)\[email protected](\n \"--show-stats/--no-show-stats\",\n default=True,\n help=\"Show stats about the dataset on completion. Default is show. Always hidden if you're using stdin / stdout pipes.\",\n)\ndef csv(\n infile,\n outfile,\n json_encode_all,\n json_encode_lists,\n json_encode_text,\n inline_referenced_tweets,\n inline_pinned_tweets,\n allow_duplicates,\n input_tweet_columns,\n input_users_columns,\n input_columns,\n output_columns,\n batch_size,\n show_stats,\n):\n \"\"\"\n Convert tweets to CSV.\n \"\"\"\n\n if infile.name == outfile.name:\n click.echo(\n click.style(\n f\"💔 Cannot convert files in-place, specify a different output file!\",\n fg=\"red\",\n ),\n err=True,\n )\n return\n\n converter = CSVConverter(\n infile,\n outfile,\n json_encode_all,\n json_encode_lists,\n json_encode_text,\n inline_referenced_tweets,\n inline_pinned_tweets,\n allow_duplicates,\n input_tweet_columns,\n input_users_columns,\n input_columns,\n output_columns,\n batch_size,\n )\n converter.process()\n\n errors = (\n click.style(\n f\"{converter.counts['parse_errors']} failed to parse. See twarc.log for details.\\n\",\n fg=\"red\",\n )\n if converter.counts[\"parse_errors\"] > 0\n else \"\"\n )\n\n if show_stats and outfile.name != \"<stdout>\":\n click.echo(\n f\"\\nℹ️\\n\"\n + f\"Read {converter.counts['tweets']} tweets from {converter.counts['lines']} lines. \\n\"\n + f\"{converter.counts['referenced_tweets']} were referenced tweets, {converter.counts['duplicates']} were duplicates.\\n\"\n + errors\n + f\"Wrote {converter.counts['rows']} rows and wrote {converter.counts['output_columns']} of {converter.counts['input_columns']} input columns in the CSV.\\n\",\n err=True,\n )\n" ]
[ [ "pandas.api.types.is_list_like", "pandas.json_normalize", "pandas.DataFrame" ] ]
igg-bioinfo/CKG
[ "7984ae239c5010545b95c9be3201a899682294de" ]
[ "ckg/report_manager/apps/dataUpload.py" ]
[ "import os\nimport sys\nimport re\nimport pandas as pd\nimport numpy as np\nfrom ckg import ckg_utils\nfrom ckg.graphdb_connector import connector\nfrom ckg.graphdb_builder import builder_utils\nfrom ckg.graphdb_connector import query_utils\nfrom ckg.analytics_core.viz import viz\n\nckg_config = ckg_utils.read_ckg_config()\nlog_config = ckg_config['graphdb_builder_log']\nlogger = builder_utils.setup_logging(log_config, key=\"data_upload\")\n\n\ndef get_data_upload_queries():\n \"\"\"\n Reads the YAML file containing the queries relevant to parsing of clinical data and \\\n returns a Python object (dict[dict]).\n\n :return: Nested dictionary.\n \"\"\"\n try:\n queries_path = \"../queries/data_upload_cypher.yml\"\n directory = os.path.dirname(os.path.abspath(__file__))\n data_upload_cypher = ckg_utils.get_queries(os.path.join(directory, queries_path))\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Reading queries from file {}: {}, file: {},line: {}\".format(err, queries_path, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return data_upload_cypher\n\n\ndef get_new_subject_identifier(driver):\n \"\"\"\n Queries the database for the last subject identifier and returns a new sequential identifier.\n\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :type driver: neo4j driver\n :param str projectId: external project identifier (from the graph database).\n :return: Subject identifier.\n :rtype: str\n \"\"\"\n query_name = 'increment_subject_id'\n query = ''\n try:\n cypher = get_data_upload_queries()\n query = cypher[query_name]['query']\n subject_identifier = connector.getCursorData(driver, query).values[0][0]\n except Exception as err:\n subject_identifier = None\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Getting new subject identifiers: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n return subject_identifier\n\n\ndef get_new_biosample_identifier(driver):\n \"\"\"\n Queries the database for the last biological sample internal identifier and returns a new sequential identifier.\n\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n\n :return: Biological sample identifier.\n \"\"\"\n query_name = 'increment_biosample_id'\n query = ''\n try:\n cypher = get_data_upload_queries()\n query = cypher[query_name]['query']\n identifier = connector.getCursorData(driver, query).values[0][0]\n except Exception as err:\n identifier = None\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Getting new biological sample identifiers: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return identifier\n\n\ndef get_new_analytical_sample_identifier(driver):\n \"\"\"\n Queries the database for the last analytical sample internal identifier and returns a new sequential identifier.\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n\n :return: Analytical sample identifier.\n \"\"\"\n query_name = 'increment_analytical_sample_id'\n query = ''\n try:\n cypher = get_data_upload_queries()\n query = cypher[query_name]['query']\n identifier = connector.getCursorData(driver, query).values[0][0]\n except Exception as err:\n identifier = None\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Getting new analytical sample identifiers: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return identifier\n\n\ndef get_subjects_enrolled_in_project(driver, projectId):\n \"\"\"\n Extracts the number of subjects included in a given project.\n\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :type driver: neo4j driver\n :param str projectId: external project identifier (from the graph database).\n :return: Number of subjects.\n :rtype: Numpy ndarray\n \"\"\"\n query_name = 'extract_enrolled_subjects'\n query = ''\n try:\n data_upload_cypher = get_data_upload_queries()\n query = data_upload_cypher[query_name]['query']\n for q in query.split(';')[0:-1]:\n if '$' in q:\n result = connector.getCursorData(driver, q+';', parameters={'external_id': str(projectId)})\n else:\n result = connector.getCursorData(driver, q+';')\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Getting new subjects enrolled in project: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n return result.values\n\n\ndef check_samples_in_project(driver, projectId):\n \"\"\"\n \"\"\"\n query_name = 'extract_samples_numbers'\n query = ''\n result = pd.DataFrame()\n try:\n data_upload_cypher = get_data_upload_queries()\n query = data_upload_cypher[query_name]['query']\n result = connector.getCursorData(driver, query, parameters={'external_id': str(projectId)})\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Checking whether samples exist in project: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return result\n\n\ndef check_external_ids_in_db(driver, projectId):\n \"\"\"\n \"\"\"\n query_name = 'check_external_ids'\n query = ''\n result = pd.DataFrame()\n try:\n data_upload_cypher = get_data_upload_queries()\n query = data_upload_cypher[query_name]['query']\n result = connector.getCursorData(driver, query, parameters={'external_id': str(projectId)})\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Checking if external identifiers exist in the database: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return result\n\n\ndef remove_samples_nodes_db(driver, projectId):\n \"\"\"\n \"\"\"\n result = None\n query_name = 'remove_project'\n query = ''\n try:\n queries_path = \"../queries/project_cypher.yml\"\n directory = os.path.dirname(os.path.abspath(__file__))\n project_cypher = ckg_utils.get_queries(os.path.join(directory, queries_path))\n query = project_cypher[query_name]['query'].replace('PROJECTID', projectId).split(';')[:-2]\n for q in query:\n result = connector.commitQuery(driver, q+';')\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Removing nodes associated to project: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n return result\n\n\ndef create_new_subjects(driver, data, projectId):\n \"\"\"\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :param data: pandas Dataframe with clinical data as columns and samples as rows.\n :param string projectId: project identifier.\n :return: Pandas DataFrame where new biological sample internal identifiers have been added.\n \"\"\"\n external_ids = data['subject external_id'].unique()\n subject_id = get_new_subject_identifier(driver)\n if subject_id is None:\n subject_id = '1'\n subject_ids = ['S'+str(i) for i in np.arange(int(subject_id), int(subject_id) + len(external_ids))]\n subject_dict = dict(zip(external_ids, subject_ids))\n query_name = 'create_project_subject'\n for external_id, subject_id in subject_dict.items():\n parameters = {'external_id': str(external_id), 'project_id': projectId, 'subject_id': subject_id}\n try:\n query = ''\n data_upload_cypher = get_data_upload_queries()\n queries = data_upload_cypher[query_name]['query'].split(';')[:-1]\n for query in queries:\n res = connector.commitQuery(driver, query+';', parameters=parameters)\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Creating new subjects: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n data['subject id'] = data['subject external_id'].map(subject_dict)\n\n return data\n\n\ndef create_new_biosamples(driver, data):\n \"\"\"\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :param data: pandas Dataframe with clinical data as columns and samples as rows.\n\n :return: Pandas DataFrame where new biological sample internal identifiers have been added.\n \"\"\"\n external_ids = data['biological_sample external_id'].unique()\n subject_ids = data['subject id']\n biosample_id = get_new_biosample_identifier(driver)\n if biosample_id is None:\n biosample_id = '1'\n\n biosample_ids = ['BS'+str(i) for i in np.arange(int(biosample_id), int(biosample_id) + len(external_ids))]\n biosample_dict = dict(zip(external_ids, biosample_ids))\n biosample_subject_dict = dict(zip(external_ids, subject_ids))\n query_name = 'create_subject_biosamples'\n for external_id, biosample_id in biosample_dict.items():\n subject_id = biosample_subject_dict[external_id]\n parameters = {'external_id': str(external_id), 'biosample_id':biosample_id, 'subject_id': subject_id}\n try:\n query = ''\n data_upload_cypher = get_data_upload_queries()\n queries = data_upload_cypher[query_name]['query'].split(';')[:-1]\n for query in queries:\n res = connector.commitQuery(driver, query+';', parameters=parameters)\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Creating biological samples: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n data['biological_sample id'] = data['biological_sample external_id'].map(biosample_dict)\n\n return data\n\n\ndef create_new_ansamples(driver, data):\n \"\"\"\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :param data: pandas Dataframe with clinical data as columns and samples as rows.\n\n :return: Pandas DataFrame where new analytical sample internal identifiers have been added.\n \"\"\"\n data = data.rename(columns={'analytical_sample external_id': 'external_id', 'biological_sample id': 'biosample_id'})\n data['external_id'] = data['external_id'].astype(str)\n num_samples = data['external_id'].shape[0]\n if 'grouping2' not in data:\n data['grouping2'] = None\n if 'batch' not in data:\n data['batch'] = None\n ansample_id = get_new_analytical_sample_identifier(driver)\n if ansample_id is None:\n ansample_id = '1'\n\n ansample_ids = ['AS' + str(i) for i in np.arange(int(ansample_id), int(ansample_id) + num_samples)]\n data['asample_id'] = ansample_ids\n query_name = 'create_asamples_biosamples'\n for parameters in data.to_dict('records'):\n print(parameters)\n try:\n query = ''\n data_upload_cypher = get_data_upload_queries()\n queries = data_upload_cypher[query_name]['query'].split(';')[:-1]\n for query in queries:\n res = connector.commitQuery(driver, query+';', parameters=parameters)\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Creating analytical samples: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n data = data.rename(columns={'asample_id': 'analytical_sample id', 'external_id': 'analytical_sample external_id', 'biosample_id': 'biological_sample id'})\n\n return data\n\n\ndef create_experiment_internal_identifiers(driver, projectId, data, directory, filename):\n done = 0\n df = create_new_subjects(driver, data, projectId)\n df1 = create_new_biosamples(driver, df)\n df2 = create_new_ansamples(driver, df1)\n builder_utils.export_contents(df2, directory, filename)\n done += 1\n\n return done\n\n\ndef create_mapping_cols_clinical(driver, data, directory, filename, separator='|'):\n \"\"\"\n :param driver: neo4j driver, which provides the connection to the neo4j graph database.\n :type driver: neo4j driver\n :param data: pandas Dataframe with clinical data as columns and samples as rows.\n :param str separator: character used to separate multiple entries in an attribute.\n\n :return: Pandas Dataframe with all clinical data and graph database internal identifiers.\n \"\"\"\n tissue_dict = {}\n disease_dict = {}\n intervention_dict = {}\n if 'disease' in data:\n for disease in data['disease'].dropna().unique():\n if len(disease.split(separator)) > 1:\n ids = []\n for i in disease.split(separator):\n disease_id = query_utils.map_node_name_to_id(driver, 'Disease', str(i.strip()))\n if disease_id is not None:\n ids.append(disease_id)\n disease_dict[disease] = '|'.join(ids)\n else:\n disease_id = query_utils.map_node_name_to_id(driver, 'Disease', str(disease.strip()))\n disease_dict[disease] = disease_id\n data['disease id'] = data['disease'].map(disease_dict)\n\n if 'tissue' in data:\n for tissue in data['tissue'].dropna().unique():\n tissue_id = query_utils.map_node_name_to_id(driver, 'Tissue', str(tissue.strip()))\n tissue_dict[tissue] = tissue_id\n\n data['tissue id'] = data['tissue'].map(tissue_dict)\n\n if 'studies_intervention' in data:\n for interventions in data['studies_intervention'].dropna().unique():\n for intervention in str(interventions).split('|'):\n if len(intervention.split()) > 1:\n intervention_dict[intervention] = re.search(r'\\(([^)]+)', intervention.split()[-1]).group(1)\n else:\n intervention_dict[intervention] = intervention\n\n data['intervention id'] = data['studies_intervention'].map(intervention_dict)\n\n builder_utils.export_contents(data, directory, filename)\n\n\ndef get_project_information(driver, project_id):\n query_name = 'project_graph'\n queries = []\n data = []\n res = []\n try:\n query = ''\n data_upload_cypher = get_data_upload_queries()\n for section in data_upload_cypher[query_name]:\n code = section['query']\n queries.extend(code.replace(\"PROJECTID\", project_id).split(';')[0:-1])\n for query in queries:\n result = connector.sendQuery(driver, query+\";\")[0]\n data.append(result)\n except Exception as err:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"Error: {}. Creating analytical samples: Query name ({}) - Query ({}), error info: {}, file: {},line: {}\".format(err, query_name, query, sys.exc_info(), fname, exc_tb.tb_lineno))\n\n if data:\n for i, j in enumerate(data):\n df = pd.DataFrame([data[i]], columns=data[i].keys())\n header = '_'.join(df.columns[0].split('_', 1)[1:]).capitalize()\n df.rename(columns={df.columns[0]: 'project'}, inplace=True)\n res.append(viz.get_table(df, identifier='new_project_{}'.format(header), args={'title':'{} data uploaded for project {}'.format(header, project_id)}))\n else:\n res = None\n logger.error(\"Error: No data was uploaded for project: {}. Review your experimental design and data files and the logs for errors.\".format(project_id))\n\n return res\n" ]
[ [ "pandas.DataFrame" ] ]
mayu-ot/rethinking-evs
[ "022e76005ca87bd2d01cd6d05e4ca3356ada179d" ]
[ "src/summe_eval.py" ]
[ "from tools.summarizer import summarize\nfrom tools.io import load_summe_mat\nimport pandas as pd\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport json\nfrom joblib import Parallel, delayed\nimport os\n\ndef get_summe_gssummary():\n summe_data = load_summe_mat('data/raw/summe/GT/')\n \n gold_standard = []\n for item in summe_data:\n user_anno = item['user_anno']\n user_anno = user_anno.T\n user_anno = user_anno.astype(np.bool)\n \n gold_standard.append(\n {\n 'gs_summary': user_anno,\n 'video': item['video']\n }\n )\n \n return gold_standard\n\ndef get_random_summary(N, segment, budget):\n rand_score = np.random.random((N,))\n rand_summary = summarize(rand_score, segment, int(N * budget))\n return rand_summary\n\ndef evaluate_baseline(in_file, verbose=True):\n results = json.load(open(in_file))\n gt_summary = get_summe_gssummary()\n \n b_score = []\n \n for item in gt_summary:\n gs_summary = item['gs_summary']\n N = gs_summary.shape[1]\n segment = results[item['video']]['segment']\n \n rand_summary = get_random_summary(N, segment, budget=0.15)\n \n f1_scores = [f1_score(x, rand_summary) for x in gs_summary]\n f1_min = min(f1_scores)\n f1_mean = sum(f1_scores) / len(f1_scores)\n f1_max = max(f1_scores)\n \n b_score.append((f1_min, f1_mean, f1_max))\n \n if verbose:\n print('%25s | %6.2f | %6.2f | %6.2f |' % (item['video'], f1_min * 100, f1_mean * 100, f1_max * 100))\n \n b_score = np.array(b_score)\n score_summary = b_score.mean(axis=0)\n \n if verbose:\n print('%25s | %6.2f | %6.2f | %6.2f |' % ('Avg.', score_summary[0] * 100, score_summary[1] * 100, score_summary[2] * 100))\n \n return {'method': 'Random',\n 'min': score_summary[0],\n 'avg': score_summary[1],\n 'max': score_summary[2]}\n \ndef evaluate(in_file, name=None, verbose=True):\n results = json.load(open(in_file))\n gt_summary = get_summe_gssummary()\n \n score = []\n baseline_score = []\n \n for item in gt_summary:\n gs_summary = item['gs_summary']\n N = gs_summary.shape[1]\n \n summary = results[item['video']]['summary']\n f1_scores = [f1_score(x, summary) for x in gs_summary]\n \n f1_min = min(f1_scores)\n f1_mean = sum(f1_scores) / len(f1_scores)\n f1_max = max(f1_scores)\n \n score.append((f1_min, f1_mean, f1_max))\n \n if verbose:\n print('%25s | %6.2f | %6.2f | %6.2f |' % (item['video'], f1_min * 100, f1_mean * 100, f1_max * 100))\n\n score = np.array(score)\n score_summary = score.mean(axis=0)\n \n if verbose:\n print('%25s | %6.2f | %6.2f | %6.2f |' % ('Avg.', score_summary[0] * 100, score_summary[1] * 100, score_summary[2] * 100))\n \n if name is None:\n name = in_file \n \n return {'method': name,\n 'min': score_summary[0],\n 'avg': score_summary[1],\n 'max': score_summary[2]}\n\ndef run(in_file):\n score_summary = evaluate(in_file, verbose=True)\n \n print('evaluating baseline scores')\n N = 100\n res = Parallel(n_jobs=-1)( [delayed(evaluate_baseline)(in_file, verbose=False) for _ in range(N)] )\n res.append(score_summary)\n df = pd.DataFrame(res)\n print(df[df.method=='Random'][['min', 'avg', 'max']].describe())\n \n out_file = 'data/processed/'+os.path.basename(in_file)+'.eval.csv'\n print(f'writing the results in {out_file}')\n df.to_csv(out_file,\n index=False)\n \nif __name__=='__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('in_file', type=str)\n args = parser.parse_args()\n run(in_file=args.in_file)" ]
[ [ "sklearn.metrics.f1_score", "numpy.array", "numpy.random.random", "pandas.DataFrame" ] ]
Inglezos/covid19-sir
[ "53306ae37d229ffcf4eaacec82dd944612fdc672", "53306ae37d229ffcf4eaacec82dd944612fdc672" ]
[ "covsirphy/cleaning/vaccine_data.py", "tests/test_cleaning/test_pcr.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport datetime\nfrom covsirphy.util.error import SubsetNotFoundError\nfrom covsirphy.cleaning.cbase import CleaningBase\n\n\nclass VaccineData(CleaningBase):\n \"\"\"\n Dataset regarding vaccination retrieved from \"Our World In Data\".\n https://github.com/owid/covid-19-data/tree/master/public/data\n https://ourworldindata.org/coronavirus\n\n Args:\n filename (str or pathlib.path): CSV filename to save the raw dataset\n force (bool): if True, always download the dataset from the server\n verbose (int): level of verbosity\n\n Note:\n Columns of VaccineData.cleaned():\n - Date (pandas.TimeStamp): observation dates\n - Country (pandas.Category): country (or province) names\n - ISO3 (pandas.Category): ISO3 codes\n - Product (pandas.Category): product names\n - Vaccinations (int): cumulative number of vaccinations\n - Vaccinated_once (int): cumulative number of people who received at least one vaccine dose\n - Vaccinated_full (int): cumulative number of people who received all doses prescrived by the protocol\n \"\"\"\n # URL\n URL = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/\"\n URL_REC = f\"{URL}vaccinations.csv\"\n URL_LOC = f\"{URL}locations.csv\"\n # Columns\n VAC_COLS = [\n CleaningBase.DATE, CleaningBase.COUNTRY, CleaningBase.ISO3, CleaningBase.PRODUCT,\n CleaningBase.VAC, CleaningBase.V_ONCE, CleaningBase.V_FULL]\n VAC_SUBSET_COLS = [CleaningBase.DATE, CleaningBase.VAC, CleaningBase.V_ONCE, CleaningBase.V_FULL]\n\n def __init__(self, filename, force=False, verbose=1):\n Path(filename).parent.mkdir(exist_ok=True, parents=True)\n if Path(filename).exists() and not force:\n try:\n self._raw = self.load(filename)\n except KeyError:\n # Error when the local dataset does not have necessary columns\n # Raised when new CovsirPhy version requires additional columns\n self._raw = self._retrieve(filename=filename, verbose=verbose)\n else:\n self._raw = self._retrieve(filename=filename, verbose=verbose)\n self._cleaned_df = self._cleaning()\n self._citation = \"Hasell, J., Mathieu, E., Beltekian, D. et al.\" \\\n \" A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020).\" \\\n \" https://doi.org/10.1038/s41597-020-00688-8\"\n # Directory that save the file\n self._dirpath = Path(filename or \"input\").resolve().parent\n\n def _retrieve(self, filename, verbose=1):\n \"\"\"\n Retrieve the dataset from server.\n Args:\n filename (str or pathlib.path): CSV filename to save the raw dataset\n verbose (int): level of verbosity\n Returns:\n pd.DataFrame:\n Index reset index\n Columns Date, Country, Product, Vaccinations\n \"\"\"\n # Show URL\n if verbose:\n print(\"Retrieving COVID-19 vaccination dataset from https://github.com/owid/covid-19-data/\")\n # Download datasets and merge them\n rename_dict = {\n \"date\": self.DATE, \"location\": self.COUNTRY, \"iso_code\": self.ISO3,\n \"vaccines\": self.PRODUCT, \"total_vaccinations\": self.VAC,\n \"people_vaccinated\": self.V_ONCE,\n \"people_fully_vaccinated\": self.V_FULL,\n }\n rec_df = self.load(self.URL_REC, columns=list(set(rename_dict) - set([\"vaccines\"])))\n loc_df = self.load(self.URL_LOC, columns=[\"location\", \"vaccines\"])\n df = rec_df.merge(loc_df, how=\"left\", on=\"location\")\n df = df.rename(rename_dict, axis=1)\n # Save the dataframe as CSV file\n df.to_csv(filename, index=False)\n return df\n\n def _cleaning(self):\n \"\"\"\n Perform data cleaning of the raw data.\n\n Returns:\n pandas.DataFrame:\n Index\n reset index\n Columns\n - Date (pandas.TimeStamp): observation dates\n - Country (pandas.Category): country (or province) names\n - ISO3 (pandas.Category): ISO3 codes\n - Product (pandas.Category): product names\n - Vaccinations (int): cumulative number of vaccinations\n - Vaccinated_once (int): cumulative number of people who received at least one vaccine dose\n - Vaccinated_full (int): cumulative number of people who received all doses prescrived by the protocol\n \"\"\"\n df = self._raw.copy()\n # Date\n df[self.DATE] = pd.to_datetime(df[self.DATE])\n for col in [self.COUNTRY, self.ISO3, self.PRODUCT]:\n df[col] = df[col].astype(\"category\")\n # Fill in NA values\n for col in [self.VAC, self.V_ONCE, self.V_FULL]:\n df[col] = pd.to_numeric(df[col], errors=\"coerce\")\n df[col] = df.groupby(self.ISO3)[col].fillna(method=\"ffill\").fillna(0).astype(np.int64)\n today_date = datetime.datetime.today().replace(hour=00, minute=00, second=00, microsecond=00)\n for country in df.Country.unique():\n subset_df = df.loc[df[self.COUNTRY] == country]\n # Add any missing dates up until today\n if subset_df[self.DATE].max() < today_date:\n new_dates = pd.date_range(\n subset_df[self.DATE].max() + datetime.timedelta(days=1), today_date)\n subset_df = subset_df.reset_index(drop=True)\n keep_index = subset_df[self.VAC].idxmax() + 1\n new_df = pd.DataFrame(index=new_dates, columns=subset_df.columns)\n new_df.index.name = self.DATE\n new_df = new_df.drop(self.DATE, axis=1).reset_index()\n subset_df = pd.concat([subset_df, new_df], axis=0, ignore_index=True).ffill()\n subset_df = subset_df.loc[keep_index:]\n df = pd.concat([df, subset_df], axis=0, ignore_index=True)\n df.sort_values(by=[self.COUNTRY, self.DATE], ignore_index=True, inplace=True)\n return df.loc[:, self.VAC_COLS]\n\n def subset(self, country, product=None, start_date=None, end_date=None):\n \"\"\"\n Return subset of the country/province and start/end date.\n\n Args:\n country (str or None): country name or ISO3 code\n product (str or None): product name\n start_date (str or None): start date, like 22Jan2020\n end_date (str or None): end date, like 01Feb2020\n\n Returns:\n pandas.DataFrame\n Index\n reset index\n Columns\n - Date (pandas.TimeStamp): observation date\n - Vaccinations (int): the number of vaccinations\n - Vaccinated_once (int): cumulative number of people who received at least one vaccine dose\n - Vaccinated_full (int): cumulative number of people who received all doses prescrived by the protocol\n \"\"\"\n df = self._cleaned_df.copy()\n # Subset by country\n country_alias = self.ensure_country_name(country)\n df = df.loc[df[self.COUNTRY] == country_alias]\n # Subset by product name\n if product is not None:\n df = df.loc[df[self.PRODUCT] == product]\n # Subset with start date\n if start_date is not None:\n df = df.loc[df[self.DATE] >= self._ensure_date(start_date)]\n # Subset with end date\n if end_date is not None:\n df = df.loc[df[self.DATE] <= self._ensure_date(end_date)]\n # Check records were found\n if df.empty:\n raise SubsetNotFoundError(\n country=country, country_alias=country_alias, province=product,\n start_date=start_date, end_date=end_date)\n return df.loc[:, self.VAC_SUBSET_COLS].reset_index(drop=True)\n\n def records(self, country, product=None, start_date=None, end_date=None):\n \"\"\"\n Return subset of the country/province and start/end date.\n\n Args:\n country (str or None): country name or ISO3 code\n product (str or None): product name\n start_date (str or None): start date, like 22Jan2020\n end_date (str or None): end date, like 01Feb2020\n\n Returns:\n pandas.DataFrame\n Index\n reset index\n Columns\n - Date (pandas.TimeStamp): observation date\n - Vaccinations (int): the number of vaccinations\n - Vaccinated_once (int): cumulative number of people who received at least one vaccine dose\n - Vaccinated_full (int): cumulative number of people who received all doses prescrived by the protocol\n \"\"\"\n return self.subset(\n country=country, product=product, start_date=start_date, end_date=end_date)\n\n def total(self):\n \"\"\"\n Calculate total values of the cleaned dataset.\n\n Returns:\n pandas.DataFrame:\n Index\n reset index\n Columns\n - Date (pandas.TimeStamp): observation date\n - Vaccinations (int): the number of vaccinations\n - Vaccinated_once (int): cumulative number of people who received at least one vaccine dose\n - Vaccinated_full (int): cumulative number of people who received all doses prescrived by the protocol\n \"\"\"\n df = self._cleaned_df.copy()\n # Select 'World' data\n df = df.loc[df[self.COUNTRY] == \"World\"]\n # Resampling\n df = df.set_index(self.DATE).resample(\"D\").sum()\n return df.reset_index()\n\n def map(self, country=None, variable=\"Vaccinations\", date=None, **kwargs):\n \"\"\"\n Create colored map with the number of vaccinations.\n\n Args:\n country (None): always None\n variable (str): variable to show\n date (str or None): date of the records or None (the last value)\n kwargs: arguments of ColoredMap() and ColoredMap.plot()\n\n Raises:\n NotImplementedError: @country was specified\n \"\"\"\n if country is not None:\n raise NotImplementedError(\"@country cannot be specified, always None.\")\n # Date\n date_str = date or self.cleaned()[self.DATE].max().strftime(self.DATE_FORMAT)\n country_str = \"Global\"\n title = f\"{country_str}: the number of {variable.lower()} on {date_str}\"\n # Global map\n return self._colored_map_global(variable=variable, title=title, date=date, **kwargs)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport warnings\nimport pytest\nimport pandas as pd\nfrom covsirphy import SubsetNotFoundError, PCRIncorrectPreconditionError\nfrom covsirphy import PCRData\n\n\nclass TestPCRData(object):\n def test_cleaning(self, pcr_data):\n df = pcr_data.cleaned()\n assert set(df.columns) == set(PCRData.PCR_COLUMNS)\n\n def test_from_dataframe(self, pcr_data):\n df = pcr_data.cleaned()\n assert isinstance(PCRData.from_dataframe(df), PCRData)\n\n def test_use_ourworldindata(self, pcr_data):\n pcr_data.use_ourworldindata(\n filename=\"input/ourworldindata_pcr.csv\")\n\n @pytest.mark.parametrize(\"country\", [\"Japan\"])\n def test_subset(self, pcr_data, country):\n with pytest.raises(SubsetNotFoundError):\n pcr_data.subset(country, end_date=\"01Jan2000\")\n df = pcr_data.subset(country)\n df = pcr_data.subset(country, end_date=\"01Jan2021\")\n assert set(df.columns) == set([*PCRData.PCR_NLOC_COLUMNS, PCRData.T_DIFF])\n\n @pytest.mark.parametrize(\"country\", [\"Greece\"])\n def test_subset_complement(self, pcr_data, country):\n with pytest.raises(NotImplementedError):\n pcr_data.subset_complement(country)\n\n @pytest.mark.parametrize(\"country\", [\"Greece\"])\n def test_records(self, pcr_data, country):\n with pytest.raises(SubsetNotFoundError):\n pcr_data.records(country, end_date=\"01Jan2000\")\n df, _ = pcr_data.records(country)\n assert set(df.columns) == set([*PCRData.PCR_NLOC_COLUMNS, PCRData.T_DIFF])\n\n @pytest.mark.parametrize(\"country\", [\"Greece\", \"Italy\", \"Sweden\"])\n @pytest.mark.parametrize(\"last_date\", [\"21Apr2021\", None])\n def test_positive_rate(self, pcr_data, country, last_date):\n warnings.simplefilter(\"ignore\", category=UserWarning)\n pcr_data.positive_rate(country, last_date=last_date, show_figure=True)\n df = pcr_data.positive_rate(country, last_date=last_date, show_figure=False)\n assert set([PCRData.T_DIFF, PCRData.C_DIFF, PCRData.PCR_RATE]).issubset(df.columns)\n if last_date is not None:\n assert df[pcr_data.DATE].max() <= pd.to_datetime(last_date)\n\n @pytest.mark.parametrize(\"country\", [\"China\"])\n def test_positive_rate_error(self, pcr_data, country):\n with pytest.raises(PCRIncorrectPreconditionError):\n pcr_data.positive_rate(country, show_figure=False)\n\n def test_map(self, pcr_data):\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n pcr_data.map(country=None)\n pcr_data.map(country=\"Japan\")\n with pytest.raises(NotImplementedError):\n pcr_data.map(variable=\"Feeling\")\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.to_numeric", "pandas.DataFrame" ], [ "pandas.to_datetime" ] ]
neozhangthe1/coverage_model
[ "8717e0a1fdd67af16a279e6b9aecf562f31bcdce", "8717e0a1fdd67af16a279e6b9aecf562f31bcdce" ]
[ "groundhog/datasets/TM_dataset.py", "experiments/nmt/force_decoding.py" ]
[ "\"\"\"\nData iterator for text datasets that are used for translation model.\n\"\"\"\n__docformat__ = 'restructedtext en'\n__authors__ = (\"Razvan Pascanu \"\n \"Caglar Gulcehre \"\n \"KyungHyun Cho \")\n__contact__ = \"Razvan Pascanu <r.pascanu@gmail>\"\n\nimport numpy as np\n\nimport os, gc\n\nimport tables\nimport copy\nimport logging\n\nimport threading\nimport queue\n\nimport collections\n\nlogger = logging.getLogger(__name__)\n\nclass TMIterator(object):\n\n def __init__(self,\n batch_size,\n target_lfiles=None,\n source_lfiles=None,\n order = 0,\n dtype=\"int64\",\n use_infinite_loop=True,\n stop=-1,\n output_format = None,\n can_fit = False,\n shuffle = False):\n\n assert type(source_lfiles) == list, \"Target language file should be a list.\"\n\n if target_lfiles is not None:\n assert type(target_lfiles) == list, \"Target language file should be a list.\"\n assert len(target_lfiles) == len(source_lfiles)\n\n self.batch_size = batch_size\n self.target_lfiles = target_lfiles\n self.source_lfiles = source_lfiles\n self.use_infinite_loop=use_infinite_loop\n self.target_langs = []\n self.source_langs = []\n self.order = order\n self.offset = 0\n self.data_len = 0\n self.stop = stop\n self.can_fit = can_fit\n self.dtype = dtype\n self.output_format = output_format\n self.shuffle = shuffle\n self.load_files()\n\n def load_files(self):\n mmap_mode = None\n if self.can_fit == False:\n mmap_mode = \"r\"\n if self.target_lfiles is not None:\n for target_lfile in self.target_lfiles:\n if target_lfile[-3:] == '.gz':\n target_lang = np.loadtxt(target_lfile)\n else:\n target_lang = np.load(target_lfile, mmap_mode=mmap_mode)\n self.target_langs.append(target_lang)\n\n for source_lfile in self.source_lfiles:\n if source_lfile[-3:] == '.gz':\n source_lang = np.loadtxt(source_lfile)\n else:\n source_lang = np.load(source_lfile, mmap_mode=mmap_mode)\n self.source_langs.append(source_lang)\n if isinstance(source_lang, list):\n self.data_len = len(source_lang)\n else:\n self.data_len = source_lang.shape[0]\n\n if self.shuffle and self.can_fit:\n shuffled_indx = np.arange(self.data_len)\n np.random.shuffle(shuffled_indx)\n if self.target_lfiles is not None:\n if isinstance(self.target_langs[0], list):\n shuffled_target=[np.array([tt[si] for si in shuffled_indx]) for tt in self.target_langs]\n else:\n shuffled_target = [tt[shuffled_indx] for tt in self.target_langs]\n self.target_langs = shuffled_target\n if isinstance(self.source_langs[0], list):\n shuffled_source=[np.array([tt[si] for si in shuffled_indx]) for tt in self.source_langs]\n else:\n shuffled_source = [tt[shuffled_indx] for tt in self.source_langs]\n self.source_langs = shuffled_source\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.offset = 0\n\n def __next__(self):\n if self.stop != -1 and self.offset >= self.stop:\n self.offset = 0\n raise StopIteration\n else:\n while True:\n source_data = []\n target_data = []\n\n for source_lang in self.source_langs:\n inc_offset = self.offset+self.batch_size\n npos = 0\n while not npos and inc_offset <= self.data_len:\n npos = len([x for x in\n source_lang[self.offset:inc_offset].tolist()\n if len(x) > 0 ])\n nzeros = self.batch_size - npos\n inc_offset += nzeros\n\n sents = np.asarray([np.cast[self.dtype](si) for si in\n source_lang[self.offset:inc_offset].tolist()\n if len(si)>0])\n if self.order:\n sents = sents.T\n source_data.append(sents)\n\n for target_lang in self.target_langs:\n inc_offset = self.offset+self.batch_size\n npos = 0\n while not npos and inc_offset <= self.data_len:\n npos = len([x for x in\n target_lang[self.offset:inc_offset].tolist()\n if len(x) > 0 ])\n nzeros = self.batch_size - npos\n inc_offset += nzeros\n\n sents = np.asarray([np.cast[self.dtype](si) for si in target_lang[self.offset:inc_offset].tolist() if len(si) > 0])\n if self.order:\n sents = sents.T\n target_data.append(sents)\n if inc_offset > self.data_len and self.use_infinite_loop:\n print(\"Restarting the dataset iterator.\")\n inc_offset = 0 #self.offset + self.batch_size\n elif inc_offset > self.data_len:\n self.offset = 0\n raise StopIteration\n if len(source_data[0]) < 1 or len(target_data[0]) < 1:\n self.offset = inc_offset\n inc_offset = self.offset+self.batch_size\n continue\n break\n self.offset = inc_offset\n if not self.output_format:\n return source_data, target_data\n else:\n return self.output_format(source_data, target_data)\n\nclass PytablesBitextFetcher(threading.Thread):\n def __init__(self, parent, start_offset):\n threading.Thread.__init__(self)\n self.parent = parent\n self.start_offset = start_offset\n \n # added by Zhaopeng Tu, 2016-01-08\n # ugly one, only for obtaining the data_len\n diter = self.parent\n driver = None\n if diter.can_fit:\n driver = \"H5FD_CORE\"\n source_table = tables.open_file(diter.source_file, 'r', driver=driver)\n source_data, source_index = (source_table.get_node(diter.table_name),\n source_table.get_node(diter.index_name))\n self.data_len = source_index.shape[0]\n\n def run(self):\n diter = self.parent\n\n driver = None\n if diter.can_fit:\n driver = \"H5FD_CORE\"\n\n target_table = tables.open_file(diter.target_file, 'r', driver=driver)\n target_data, target_index = (target_table.get_node(diter.table_name),\n target_table.get_node(diter.index_name))\n\n source_table = tables.open_file(diter.source_file, 'r', driver=driver)\n source_data, source_index = (source_table.get_node(diter.table_name),\n source_table.get_node(diter.index_name))\n\n assert source_index.shape[0] == target_index.shape[0]\n # self.data_len = source_index.shape[0]\n\n offset = self.start_offset\n if offset == -1:\n offset = 0\n if diter.shuffle:\n offset = np.random.randint(self.data_len)\n logger.debug(\"{} entries\".format(self.data_len))\n logger.debug(\"Starting from the entry {}\".format(offset))\n\n while not diter.exit_flag:\n last_batch = False\n source_sents = []\n target_sents = []\n while len(source_sents) < diter.batch_size:\n if offset == self.data_len:\n if diter.use_infinite_loop:\n offset = 0\n else:\n last_batch = True\n break\n\n slen, spos = source_index[offset]['length'], source_index[offset]['pos']\n tlen, tpos = target_index[offset]['length'], target_index[offset]['pos']\n offset += 1\n\n if slen > diter.max_len or tlen > diter.max_len:\n continue\n source_sents.append(source_data[spos:spos + slen].astype(diter.dtype))\n target_sents.append(target_data[tpos:tpos + tlen].astype(diter.dtype))\n\n if len(source_sents):\n diter.queue.put([int(offset), source_sents, target_sents])\n if last_batch:\n diter.queue.put([None])\n return\n\nclass PytablesBitextIterator(object):\n\n def __init__(self,\n batch_size,\n target_file=None,\n source_file=None,\n dtype=\"int64\",\n table_name='/phrases',\n index_name='/indices',\n can_fit=False,\n queue_size=1000,\n cache_size=1000,\n shuffle=True,\n use_infinite_loop=True,\n max_len=1000):\n\n args = locals()\n args.pop(\"self\")\n self.__dict__.update(args)\n\n self.exit_flag = False\n\n def start(self, start_offset):\n self.queue = queue.Queue(maxsize=self.queue_size)\n self.gather = PytablesBitextFetcher(self, start_offset)\n self.gather.daemon = True\n self.gather.start()\n # added by Zhaopeng Tu, 2015-01-08\n # for halving the learning rate\n self.data_len = self.gather.data_len\n\n def __del__(self):\n if hasattr(self, 'gather'):\n self.gather.exitFlag = True\n self.gather.join()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n batch = self.queue.get()\n if not batch:\n return None\n self.next_offset = batch[0]\n return batch[1], batch[2]\n\nclass NNJMContextIterator(object):\n\n def __init__(self,\n batch_size,\n order = 0,\n path = None,\n dtype = \"int64\",\n use_infinite_loop = True,\n stop = -1,\n output_format = None,\n can_fit = False):\n\n assert path is not None, \"Path should not be empty!.\"\n\n self.source_ctxt = None\n self.target_ctxt = None\n self.targets = None\n\n self.batch_size = batch_size\n self.path = path\n self.use_infinite_loop = use_infinite_loop\n self.order = order\n self.offset = 0\n self.data_len = 0\n self.stop = stop\n self.can_fit = can_fit\n self.dtype = dtype\n self.output_format = output_format\n self.load_files()\n\n def load_files(self):\n mmap_mode = None\n if self.can_fit == False:\n mmap_mode = \"r\"\n\n data_file = np.load(self.path, mmap_mode=mmap_mode)\n\n self.source_ctxt = data_file[\"src_ctxt\"]\n self.target_ctxt = data_file[\"tgt_ctxt\"]\n self.targets = data_file[\"tgts\"]\n self.targets = self.targets.reshape(self.targets.shape[0], 1)\n\n self.data_len = self.source_ctxt.shape[0]\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.offset = 0\n\n def __next__(self):\n if self.stop != -1 and self.offset >= self.stop:\n self.offset = 0\n raise StopIteration\n else:\n while True:\n inc_offset = self.offset + self.batch_size\n if inc_offset > self.data_len and self.use_infinite_loop:\n print(\"Restarting the dataset iterator.\")\n inc_offset = 0\n elif inc_offset > self.data_len:\n self.offset = 0\n raise StopIteration\n\n sents_s = np.asarray([np.cast[self.dtype](si) for si in\n self.source_ctxt[self.offset:inc_offset].tolist()\n if len(si)>0])\n\n if self.order:\n sents_s = sents_s.T\n\n source_ctxt = sents_s\n sents_t = np.asarray([np.cast[self.dtype](si) for si in\n self.target_ctxt[self.offset:inc_offset].tolist()\n if len(si)>0])\n\n if self.order:\n sents_t = sents_t.T\n\n target_ctxt = sents_t\n targets = np.asarray([np.cast[self.dtype](si) for si in\n self.targets[self.offset:inc_offset].tolist()\n if len(si)>0])\n\n if len(source_ctxt) < 1 or len(target_ctxt) < 1 or len(targets) < 1:\n self.offset = inc_offset\n inc_offset = self.offset + self.batch_size\n continue\n break\n self.offset = inc_offset\n\n if not self.output_format:\n return source_ctxt, target_ctxt, targets\n else:\n return self.output_format(source_ctxt, target_ctxt, targets)\n\n\n", "#!/usr/bin/env python\n\nimport argparse\nimport pickle\nimport traceback\nimport logging\nimport time\nimport sys\n\nimport numpy\n\nimport experiments.nmt\nfrom experiments.nmt import\\\n RNNEncoderDecoder,\\\n prototype_state,\\\n prototype_search_with_coverage_state,\\\n parse_input, parse_target\n\nfrom experiments.nmt.numpy_compat import argpartition\n\nnumpy.set_printoptions(threshold=numpy.nan)\n\nlogger = logging.getLogger(__name__)\n\nclass Timer(object):\n\n def __init__(self):\n self.total = 0\n\n def start(self):\n self.start_time = time.time()\n\n def finish(self):\n self.total += time.time() - self.start_time\n\nclass BeamSearch(object):\n\n def __init__(self, enc_dec):\n self.enc_dec = enc_dec\n state = self.enc_dec.state\n self.eos_id = state['null_sym_target']\n self.unk_id = state['unk_sym_target']\n\n def compile(self):\n self.comp_repr = self.enc_dec.create_representation_computer()\n # added by Zhaopeng Tu, 2015-12-17, for fertility\n if self.enc_dec.state['maintain_coverage'] and self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:\n self.comp_fert = self.enc_dec.create_fertility_computer()\n self.comp_init_states = self.enc_dec.create_initializers()\n self.comp_next_probs = self.enc_dec.create_next_probs_computer()\n self.comp_next_states = self.enc_dec.create_next_states_computer()\n\n def search(self, seq, out, ignore_unk=False, minlen=1):\n c = self.comp_repr(seq)[0]\n states = [x[None, :] for x in self.comp_init_states(c)]\n dim = states[0].shape[1]\n # added by Zhaopeng Tu, 2015-11-02\n if self.enc_dec.state['maintain_coverage']:\n coverage_dim = self.enc_dec.state['coverage_dim']\n if self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['coverage_accumulated_operation'] == 'subtractive':\n coverages = numpy.ones((c.shape[0], 1, coverage_dim), dtype='float32')\n else:\n coverages = numpy.zeros((c.shape[0], 1, coverage_dim), dtype='float32')\n else:\n coverages = None\n \n if self.enc_dec.state['maintain_coverage'] and self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:\n fertility = self.comp_fert(c)\n else:\n fertility = None\n\n num_levels = len(states)\n\n aligns = []\n costs = [0.0]\n\n for k in range(len(out)):\n # Compute probabilities of the next words for\n # all the elements of the beam.\n last_words = (numpy.array([out[k-1]])\n if k > 0\n else numpy.zeros(1, dtype=\"int64\"))\n\n results = self.comp_next_probs(c, k, last_words, *states, coverage_before=coverages, fertility=fertility)\n log_probs = numpy.log(results[0])\n alignment = results[1]\n # alignment shape: (source_len, target_num) where target_num = 1\n aligns.append(alignment[:,0])\n\n # Adjust log probs according to search restrictions\n if ignore_unk:\n log_probs[:,self.unk_id] = -numpy.inf\n # TODO: report me in the paper!!!\n if k < minlen:\n log_probs[:,self.eos_id] = -numpy.inf\n\n # costs = numpy.array(costs)[:, None] - log_probs\n\n inputs = numpy.array([out[k]])\n states = self.comp_next_states(c, k, inputs, *states, coverage_before=coverages, fertility=fertility)\n if self.enc_dec.state['maintain_coverage']:\n coverages = states[-1]\n states = states[:-1]\n\n if self.enc_dec.state['maintain_coverage']:\n coverage = coverages[:,0,0]\n # aligns shape: (target_len, source_len)\n # we reverse it to the shape (source_len, target_len) to show the matrix\n aligns = numpy.array(aligns).transpose()\n\n if self.enc_dec.state['maintain_coverage']:\n if self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:\n return aligns, costs, coverage, fertility\n else:\n return aligns, costs, coverage\n else:\n return aligns, costs\n\ndef indices_to_words(i2w, seq):\n sen = []\n for k in range(len(seq)):\n if i2w[seq[k]] == '<eol>':\n break\n sen.append(i2w[seq[k]])\n return sen\n\ndef force_decoding(lm_model, seq, out,\n sampler=None, beam_search=None,\n ignore_unk=False, normalize=False,\n alpha=1, verbose=False):\n if lm_model.maintain_coverage:\n if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:\n aligns, costs, coverage, fertility = beam_search.search(seq, out,\n ignore_unk=ignore_unk)\n else:\n aligns, costs, coverage = beam_search.search(seq, out,\n ignore_unk=ignore_unk)\n else:\n aligns, costs = beam_search.search(seq, out,\n ignore_unk=ignore_unk)\n if normalize:\n costs = [co / len(out) for co in costs]\n\n if lm_model.maintain_coverage:\n if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:\n return aligns, costs, coverage, fertility\n else:\n return aligns, costs, coverage\n else:\n return aligns, costs\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n \"Force decoding a sentence pair to output the alignments\")\n parser.add_argument(\"--state\",\n required=True, help=\"State to use\")\n parser.add_argument(\"--ignore-unk\",\n default=False, action=\"store_true\",\n help=\"Ignore unknown words\")\n parser.add_argument(\"--source\",\n help=\"File of source sentences\")\n parser.add_argument(\"--target\",\n help=\"File of target sentences\")\n parser.add_argument(\"--aligns\",\n help=\"File to save alignments in\")\n parser.add_argument(\"--normalize\",\n action=\"store_true\", default=False,\n help=\"Normalize log-prob with the word count\")\n parser.add_argument(\"--verbose\",\n action=\"store_true\", default=False,\n help=\"Be verbose\")\n parser.add_argument(\"model_path\",\n help=\"Path to the model\")\n parser.add_argument(\"changes\",\n nargs=\"?\", default=\"\",\n help=\"Changes to state\")\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n\n state = prototype_search_with_coverage_state()\n with open(args.state) as src:\n state.update(pickle.load(src))\n state.update(eval(\"dict({})\".format(args.changes)))\n\n logging.basicConfig(level=getattr(logging, state['level']), format=\"%(asctime)s: %(name)s: %(levelname)s: %(message)s\")\n\n rng = numpy.random.RandomState(state['seed'])\n enc_dec = RNNEncoderDecoder(state, rng, skip_init=True, compute_alignment=True)\n enc_dec.build()\n lm_model = enc_dec.create_lm_model()\n lm_model.load(args.model_path)\n indx_word = pickle.load(open(state['word_indx'],'rb'))\n t_indx_word = pickle.load(open(state['word_indx_trgt'], 'rb'))\n\n sampler = None\n beam_search = BeamSearch(enc_dec)\n beam_search.compile()\n\n idict_src = pickle.load(open(state['indx_word'],'rb'))\n t_idict_src = pickle.load(open(state['indx_word_target'],'rb'))\n\n\n fsrc = open(args.source, 'r')\n ftrg = open(args.target, 'r')\n\n start_time = time.time()\n\n total_cost = 0.0\n # for i, line in enumerate(fsrc):\n i = 0\n while 1:\n try:\n seqin = fsrc.next().strip()\n seqout = ftrg.next().strip()\n except StopIteration:\n break\n\n seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)\n out, parsed_out = parse_target(state, t_indx_word, seqout, idx2word=t_idict_src)\n\n if lm_model.maintain_coverage:\n if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:\n aligns, costs, coverage, fertility = force_decoding(lm_model, seq, out, sampler=sampler,\n beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)\n else:\n aligns, costs, coverage = force_decoding(lm_model, seq, out, sampler=sampler,\n beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)\n else:\n aligns, costs = force_decoding(lm_model, seq, out, sampler=sampler,\n beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)\n \n print(\"Parsed Input:\", parsed_in)\n print(\"Parsed Target:\", parsed_out)\n print('Aligns:')\n print(aligns.tolist())\n\n\n if lm_model.maintain_coverage:\n # since we filtered <eos> from trans[best], thus the index adds 1\n print(\"Coverage:\", end=' ') \n words = parsed_in.split()\n for k in range(len(words)):\n print('%s/%.2f'%(words[k], coverage[k]), end=' ')\n print('')\n\n if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:\n print('Fertility: ', end=' ')\n for k in range(len(words)):\n print('%s/%.2f'%(words[k], fertility[k]), end=' ')\n print('')\n print() \n\n total_cost += costs[0]\n if (i + 1) % 100 == 0:\n logger.debug(\"Current speed is {} per sentence\".\n format((time.time() - start_time) / (i + 1)))\n print(\"Total cost of the translations: {}\".format(total_cost))\n\n fsrc.close()\n ftrg.close()\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.arange", "numpy.random.shuffle", "numpy.load", "numpy.array", "numpy.loadtxt", "numpy.random.randint" ], [ "numpy.log", "numpy.set_printoptions", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.random.RandomState" ] ]
pouyalj/DeepLearningCoursera
[ "4c0d79a53bbdd24fbb77503fed35e73d24949be2" ]
[ "1 - Neural Networks and Deep Learning/Deep Neural Network - Application.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Deep Neural Network for Image Classification: Application\n# \n# By the time you complete this notebook, you will have finished the last programming assignment of Week 4, and also the last programming assignment of Course 1! Go you! \n# \n# To build your cat/not-a-cat classifier, you'll use the functions from the previous assignment to build a deep network. Hopefully, you'll see an improvement in accuracy over your previous logistic regression implementation. \n# \n# **After this assignment you will be able to:**\n# \n# - Build and train a deep L-layer neural network, and apply it to supervised learning\n# \n# Let's get started!\n\n# ## Table of Contents\n# - [1 - Packages](#1)\n# - [2 - Load and Process the Dataset](#2)\n# - [3 - Model Architecture](#3)\n# - [3.1 - 2-layer Neural Network](#3-1)\n# - [3.2 - L-layer Deep Neural Network](#3-2)\n# - [3.3 - General Methodology](#3-3)\n# - [4 - Two-layer Neural Network](#4)\n# - [Exercise 1 - two_layer_model](#ex-1)\n# - [4.1 - Train the model](#4-1)\n# - [5 - L-layer Neural Network](#5)\n# - [Exercise 2 - L_layer_model](#ex-2)\n# - [5.1 - Train the model](#5-1)\n# - [6 - Results Analysis](#6)\n# - [7 - Test with your own image (optional/ungraded exercise)](#7)\n\n# <a name='1'></a>\n# ## 1 - Packages\n\n# Begin by importing all the packages you'll need during this assignment. \n# \n# - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.\n# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n# - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.\n# - [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.\n# - `dnn_app_utils` provides the functions implemented in the \"Building your Deep Neural Network: Step by Step\" assignment to this notebook.\n# - `np.random.seed(1)` is used to keep all the random function calls consistent. It helps grade your work - so please don't change it! \n\n# In[1]:\n\n\nimport time\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom dnn_app_utils_v3 import *\nfrom public_tests import *\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\n\nnp.random.seed(1)\n\n\n# <a name='2'></a>\n# ## 2 - Load and Process the Dataset\n# \n# You'll be using the same \"Cat vs non-Cat\" dataset as in \"Logistic Regression as a Neural Network\" (Assignment 2). The model you built back then had 70% test accuracy on classifying cat vs non-cat images. Hopefully, your new model will perform even better!\n# \n# **Problem Statement**: You are given a dataset (\"data.h5\") containing:\n# - a training set of `m_train` images labelled as cat (1) or non-cat (0)\n# - a test set of `m_test` images labelled as cat and non-cat\n# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).\n# \n# Let's get more familiar with the dataset. Load the data by running the cell below.\n\n# In[2]:\n\n\ntrain_x_orig, train_y, test_x_orig, test_y, classes = load_data()\n\n\n# The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to check out other images. \n\n# In[3]:\n\n\n# Example of a picture\nindex = 10\nplt.imshow(train_x_orig[index])\nprint (\"y = \" + str(train_y[0,index]) + \". It's a \" + classes[train_y[0,index]].decode(\"utf-8\") + \" picture.\")\n\n\n# In[4]:\n\n\n# Explore your dataset \nm_train = train_x_orig.shape[0]\nnum_px = train_x_orig.shape[1]\nm_test = test_x_orig.shape[0]\n\nprint (\"Number of training examples: \" + str(m_train))\nprint (\"Number of testing examples: \" + str(m_test))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_x_orig shape: \" + str(train_x_orig.shape))\nprint (\"train_y shape: \" + str(train_y.shape))\nprint (\"test_x_orig shape: \" + str(test_x_orig.shape))\nprint (\"test_y shape: \" + str(test_y.shape))\n\n\n# As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.\n# \n# <img src=\"images/imvectorkiank.png\" style=\"width:450px;height:300px;\">\n# <caption><center><font color='purple'><b>Figure 1</b>: Image to vector conversion.</font></center></caption>\n\n# In[5]:\n\n\n# Reshape the training and test examples \ntrain_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The \"-1\" makes reshape flatten the remaining dimensions\ntest_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T\n\n# Standardize data to have feature values between 0 and 1.\ntrain_x = train_x_flatten/255.\ntest_x = test_x_flatten/255.\n\nprint (\"train_x's shape: \" + str(train_x.shape))\nprint (\"test_x's shape: \" + str(test_x.shape))\n\n\n# **Note**:\n# $12,288$ equals $64 \\times 64 \\times 3$, which is the size of one reshaped image vector.\n\n# <a name='3'></a>\n# ## 3 - Model Architecture\n\n# <a name='3-1'></a>\n# ### 3.1 - 2-layer Neural Network\n# \n# Now that you're familiar with the dataset, it's time to build a deep neural network to distinguish cat images from non-cat images!\n# \n# You're going to build two different models:\n# \n# - A 2-layer neural network\n# - An L-layer deep neural network\n# \n# Then, you'll compare the performance of these models, and try out some different values for $L$. \n# \n# Let's look at the two architectures:\n# \n# <img src=\"images/2layerNN_kiank.png\" style=\"width:650px;height:400px;\">\n# <caption><center><font color='purple'><b>Figure 2</b>: 2-layer neural network. <br> The model can be summarized as: INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT.</font></center></caption>\n# \n# <u><b>Detailed Architecture of Figure 2</b></u>:\n# - The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. \n# - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.\n# - Then, add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.\n# - Repeat the same process.\n# - Multiply the resulting vector by $W^{[2]}$ and add the intercept (bias). \n# - Finally, take the sigmoid of the result. If it's greater than 0.5, classify it as a cat.\n# \n# <a name='3-2'></a>\n# ### 3.2 - L-layer Deep Neural Network\n# \n# It's pretty difficult to represent an L-layer deep neural network using the above representation. However, here is a simplified network representation:\n# \n# <img src=\"images/LlayerNN_kiank.png\" style=\"width:650px;height:400px;\">\n# <caption><center><font color='purple'><b>Figure 3</b>: L-layer neural network. <br> The model can be summarized as: [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID</font></center></caption>\n# \n# <u><b>Detailed Architecture of Figure 3</b></u>:\n# - The input is a (64,64,3) image which is flattened to a vector of size (12288,1).\n# - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.\n# - Next, take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.\n# - Finally, take the sigmoid of the final linear unit. If it is greater than 0.5, classify it as a cat.\n# \n# <a name='3-3'></a>\n# ### 3.3 - General Methodology\n# \n# As usual, you'll follow the Deep Learning methodology to build the model:\n# \n# 1. Initialize parameters / Define hyperparameters\n# 2. Loop for num_iterations:\n# a. Forward propagation\n# b. Compute cost function\n# c. Backward propagation\n# d. Update parameters (using parameters, and grads from backprop) \n# 3. Use trained parameters to predict labels\n# \n# Now go ahead and implement those two models!\n\n# <a name='4'></a>\n# ## 4 - Two-layer Neural Network\n# \n# <a name='ex-1'></a>\n# ### Exercise 1 - two_layer_model \n# \n# Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions and their inputs are:\n# ```python\n# def initialize_parameters(n_x, n_h, n_y):\n# ...\n# return parameters \n# def linear_activation_forward(A_prev, W, b, activation):\n# ...\n# return A, cache\n# def compute_cost(AL, Y):\n# ...\n# return cost\n# def linear_activation_backward(dA, cache, activation):\n# ...\n# return dA_prev, dW, db\n# def update_parameters(parameters, grads, learning_rate):\n# ...\n# return parameters\n# ```\n\n# In[6]:\n\n\n### CONSTANTS DEFINING THE MODEL ####\nn_x = 12288 # num_px * num_px * 3\nn_h = 7\nn_y = 1\nlayers_dims = (n_x, n_h, n_y)\nlearning_rate = 0.0075\n\n\n# In[7]:\n\n\n# GRADED FUNCTION: two_layer_model\n\ndef two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):\n \"\"\"\n Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (n_x, number of examples)\n Y -- true \"label\" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)\n layers_dims -- dimensions of the layers (n_x, n_h, n_y)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- If set to True, this will print the cost every 100 iterations \n \n Returns:\n parameters -- a dictionary containing W1, W2, b1, and b2\n \"\"\"\n \n np.random.seed(1)\n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n (n_x, n_h, n_y) = layers_dims\n \n # Initialize parameters dictionary, by calling one of the functions you'd previously implemented\n #(≈ 1 line of code)\n # parameters = ...\n # YOUR CODE STARTS HERE\n parameters = initialize_parameters(n_x, n_h, n_y)\n \n # YOUR CODE ENDS HERE\n \n # Get W1, b1, W2 and b2 from the dictionary parameters.\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: \"X, W1, b1, W2, b2\". Output: \"A1, cache1, A2, cache2\".\n #(≈ 2 lines of code)\n # A1, cache1 = ...\n # A2, cache2 = ...\n # YOUR CODE STARTS HERE\n A1, cache1 = linear_activation_forward(X, W1, b1, activation = \"relu\")\n A2, cache2 = linear_activation_forward(A1, W2, b2, activation = \"sigmoid\")\n \n # YOUR CODE ENDS HERE\n \n # Compute cost\n #(≈ 1 line of code)\n # cost = ...\n # YOUR CODE STARTS HERE\n cost = compute_cost(A2, Y)\n \n # YOUR CODE ENDS HERE\n \n # Initializing backward propagation\n dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))\n \n # Backward propagation. Inputs: \"dA2, cache2, cache1\". Outputs: \"dA1, dW2, db2; also dA0 (not used), dW1, db1\".\n #(≈ 2 lines of code)\n # dA1, dW2, db2 = ...\n # dA0, dW1, db1 = ...\n # YOUR CODE STARTS HERE\n dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation = \"sigmoid\")\n dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation = \"relu\")\n \n # YOUR CODE ENDS HERE\n \n # Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2\n grads['dW1'] = dW1\n grads['db1'] = db1\n grads['dW2'] = dW2\n grads['db2'] = db2\n \n # Update parameters.\n #(approx. 1 line of code)\n # parameters = ...\n # YOUR CODE STARTS HERE\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # YOUR CODE ENDS HERE\n\n # Retrieve W1, b1, W2, b2 from parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n \n # Print the cost every 100 iterations\n if print_cost and i % 100 == 0 or i == num_iterations - 1:\n print(\"Cost after iteration {}: {}\".format(i, np.squeeze(cost)))\n if i % 100 == 0 or i == num_iterations:\n costs.append(cost)\n\n return parameters, costs\n\ndef plot_costs(costs, learning_rate=0.0075):\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per hundreds)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n\n# In[8]:\n\n\nparameters, costs = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2, print_cost=False)\n\nprint(\"Cost after first iteration: \" + str(costs[0]))\n\ntwo_layer_model_test(two_layer_model)\n\n\n# **Expected output:**\n# \n# ```\n# cost after iteration 1 must be around 0.69\n# ```\n\n# <a name='4-1'></a>\n# ### 4.1 - Train the model \n# \n# If your code passed the previous cell, run the cell below to train your parameters. \n# \n# - The cost should decrease on every iteration. \n# \n# - It may take up to 5 minutes to run 2500 iterations. \n\n# In[9]:\n\n\nparameters, costs = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)\nplot_costs(costs, learning_rate)\n\n\n# **Expected Output**:\n# <table> \n# <tr>\n# <td> <b>Cost after iteration 0</b></td>\n# <td> 0.6930497356599888 </td>\n# </tr>\n# <tr>\n# <td> <b>Cost after iteration 100</b></td>\n# <td> 0.6464320953428849 </td>\n# </tr>\n# <tr>\n# <td> <b>...</b></td>\n# <td> ... </td>\n# </tr>\n# <tr>\n# <td> <b>Cost after iteration 2499</b></td>\n# <td> 0.04421498215868956 </td>\n# </tr>\n# </table>\n\n# **Nice!** You successfully trained the model. Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.\n# \n# Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.\n\n# In[10]:\n\n\npredictions_train = predict(train_x, train_y, parameters)\n\n\n# **Expected Output**:\n# <table> \n# <tr>\n# <td> <b>Accuracy</b></td>\n# <td> 0.9999999999999998 </td>\n# </tr>\n# </table>\n\n# In[11]:\n\n\npredictions_test = predict(test_x, test_y, parameters)\n\n\n# **Expected Output**:\n# \n# <table> \n# <tr>\n# <td> <b>Accuracy</b></td>\n# <td> 0.72 </td>\n# </tr>\n# </table>\n\n# ### Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.\n# \n# **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called \"early stopping\" and you'll hear more about it in the next course. Early stopping is a way to prevent overfitting. \n\n# <a name='5'></a>\n# ## 5 - L-layer Neural Network\n# \n# <a name='ex-2'></a>\n# ### Exercise 2 - L_layer_model \n# \n# Use the helper functions you implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\\times$(L-1) -> LINEAR -> SIGMOID*. The functions and their inputs are:\n# ```python\n# def initialize_parameters_deep(layers_dims):\n# ...\n# return parameters \n# def L_model_forward(X, parameters):\n# ...\n# return AL, caches\n# def compute_cost(AL, Y):\n# ...\n# return cost\n# def L_model_backward(AL, Y, caches):\n# ...\n# return grads\n# def update_parameters(parameters, grads, learning_rate):\n# ...\n# return parameters\n# ```\n\n# In[12]:\n\n\n### CONSTANTS ###\nlayers_dims = [12288, 20, 7, 5, 1] # 4-layer model\n\n\n# In[13]:\n\n\n# GRADED FUNCTION: L_layer_model\n\ndef L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):\n \"\"\"\n Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.\n \n Arguments:\n X -- data, numpy array of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).\n learning_rate -- learning rate of the gradient descent update rule\n num_iterations -- number of iterations of the optimization loop\n print_cost -- if True, it prints the cost every 100 steps\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n\n np.random.seed(1)\n costs = [] # keep track of cost\n \n # Parameters initialization.\n #(≈ 1 line of code)\n # parameters = ...\n # YOUR CODE STARTS HERE\n parameters = initialize_parameters_deep(layers_dims)\n \n # YOUR CODE ENDS HERE\n \n # Loop (gradient descent)\n for i in range(0, num_iterations):\n\n # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.\n #(≈ 1 line of code)\n # AL, caches = ...\n # YOUR CODE STARTS HERE\n AL, caches = L_model_forward(X, parameters)\n \n # YOUR CODE ENDS HERE\n \n # Compute cost.\n #(≈ 1 line of code)\n # cost = ...\n # YOUR CODE STARTS HERE\n cost = compute_cost(AL, Y)\n \n # YOUR CODE ENDS HERE\n \n # Backward propagation.\n #(≈ 1 line of code)\n # grads = ... \n # YOUR CODE STARTS HERE\n grads = L_model_backward(AL, Y, caches)\n \n # YOUR CODE ENDS HERE\n \n # Update parameters.\n #(≈ 1 line of code)\n # parameters = ...\n # YOUR CODE STARTS HERE\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # YOUR CODE ENDS HERE\n \n # Print the cost every 100 iterations\n if print_cost and i % 100 == 0 or i == num_iterations - 1:\n print(\"Cost after iteration {}: {}\".format(i, np.squeeze(cost)))\n if i % 100 == 0 or i == num_iterations:\n costs.append(cost)\n \n return parameters, costs\n\n\n# In[14]:\n\n\nparameters, costs = L_layer_model(train_x, train_y, layers_dims, num_iterations = 1, print_cost = False)\n\nprint(\"Cost after first iteration: \" + str(costs[0]))\n\nL_layer_model_test(L_layer_model)\n\n\n# <a name='5-1'></a>\n# ### 5.1 - Train the model \n# \n# If your code passed the previous cell, run the cell below to train your model as a 4-layer neural network. \n# \n# - The cost should decrease on every iteration. \n# \n# - It may take up to 5 minutes to run 2500 iterations. \n\n# In[15]:\n\n\nparameters, costs = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)\n\n\n# **Expected Output**:\n# <table> \n# <tr>\n# <td> <b>Cost after iteration 0</b></td>\n# <td> 0.771749 </td>\n# </tr>\n# <tr>\n# <td> <b>Cost after iteration 100</b></td>\n# <td> 0.672053 </td>\n# </tr>\n# <tr>\n# <td> <b>...</b></td>\n# <td> ... </td>\n# </tr>\n# <tr>\n# <td> <b>Cost after iteration 2499</b></td>\n# <td> 0.088439 </td>\n# </tr>\n# </table>\n\n# In[16]:\n\n\npred_train = predict(train_x, train_y, parameters)\n\n\n# **Expected Output**:\n# \n# <table>\n# <tr>\n# <td>\n# <b>Train Accuracy</b>\n# </td>\n# <td>\n# 0.985645933014\n# </td>\n# </tr>\n# </table>\n\n# In[17]:\n\n\npred_test = predict(test_x, test_y, parameters)\n\n\n# **Expected Output**:\n# \n# <table> \n# <tr>\n# <td> <b>Test Accuracy</b></td>\n# <td> 0.8 </td>\n# </tr>\n# </table>\n\n# ### Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. \n# \n# This is pretty good performance for this task. Nice job! \n# \n# In the next course on \"Improving deep neural networks,\" you'll be able to obtain even higher accuracy by systematically searching for better hyperparameters: learning_rate, layers_dims, or num_iterations, for example. \n\n# <a name='6'></a>\n# ## 6 - Results Analysis\n# \n# First, take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images. \n\n# In[18]:\n\n\nprint_mislabeled_images(classes, test_x, test_y, pred_test)\n\n\n# **A few types of images the model tends to do poorly on include:** \n# - Cat body in an unusual position\n# - Cat appears against a background of a similar color\n# - Unusual cat color and species\n# - Camera Angle\n# - Brightness of the picture\n# - Scale variation (cat is very large or small in image) \n\n# ### Congratulations on finishing this assignment! \n# \n# You just built and trained a deep L-layer neural network, and applied it in order to distinguish cats from non-cats, a very serious and important task in deep learning. ;) \n# \n# By now, you've also completed all the assignments for Course 1 in the Deep Learning Specialization. Amazing work! If you'd like to test out how closely you resemble a cat yourself, there's an optional ungraded exercise below, where you can test your own image. \n# \n# Great work and hope to see you in the next course! \n\n# <a name='7'></a>\n# ## 7 - Test with your own image (optional/ungraded exercise) ##\n# \n# From this point, if you so choose, you can use your own image to test the output of your model. To do that follow these steps:\n# \n# 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n# 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n# 3. Change your image's name in the following code\n# 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!\n\n# In[ ]:\n\n\n## START CODE HERE ##\nmy_image = \"my_image.jpg\" # change this to the name of your image file \nmy_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)\n## END CODE HERE ##\n\nfname = \"images/\" + my_image\nimage = np.array(Image.open(fname).resize((num_px, num_px)))\nplt.imshow(image)\nimage = image / 255.\nimage = image.reshape((1, num_px * num_px * 3)).T\n\nmy_predicted_image = predict(image, my_label_y, parameters)\n\n\nprint (\"y = \" + str(np.squeeze(my_predicted_image)) + \", your L-layer model predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")\n\n\n# **References**:\n# \n# - for auto-reloading external module: http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.random.seed", "numpy.squeeze", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.divide", "matplotlib.pyplot.ylabel" ] ]
MacHu-GWU/s3splitmerge-project
[ "873892158f4a2d0ee20f291e5d3b2a80f0bae1ba" ]
[ "poc/append_parquet_file.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nFind out how to merge big parquet file with low memory usage.\n\"\"\"\n\nimport io\nfrom s3splitmerge.tests import boto_ses\nimport pyarrow\nimport pyarrow.parquet\nimport pandas as pd\nimport awswrangler as wr\n\nvalue = \"Hello World\"\ncolumns = [\"id\", \"value\"]\ndf1 = pd.DataFrame(\n [\n (1, value),\n (2, value),\n ],\n columns=columns\n)\ndf2 = pd.DataFrame(\n [\n (3, value),\n (4, value),\n ],\n columns=columns\n)\n\n\ndef create_many_files():\n df1.to_parquet(\"data-1.parquet\", index=False)\n df2.to_parquet(\"data-2.parquet\", index=False)\n\n\ndef concatenate_files():\n buffer = io.BytesIO()\n with open(\"append-parquet-file/data.parquet\", \"wb\") as f_out:\n for filename in [\"data-1.parquet\", \"data-2.parquet\"]:\n with open(filename, \"rb\") as f_in:\n buffer.write(f_in.read())\n f_out.write(buffer.getvalue())\n\n\ndef concatenate_in_memory():\n buffer = io.BytesIO()\n pqwriter = pyarrow.parquet.ParquetWriter(\n buffer,\n pyarrow.Table.from_pandas(df1).schema,\n )\n\n for df in [df1, df2]:\n t = pyarrow.Table.from_pandas(df)\n pqwriter.write_table(t)\n pqwriter.close()\n\n with open(\"append-parquet-file/data.parquet\", \"wb\") as f:\n f.write(buffer.getvalue())\n\n\ndef concat_s3_parquet_file():\n wr.s3.to_parquet(\n df1,\n path=\"s3://aws-data-lab-sanhe-aws-etl-solutions/s3splitmerge/poc/merge-parquet/data-1.parquet\",\n boto3_session=boto_ses,\n )\n wr.s3.to_parquet(\n df2,\n path=\"s3://aws-data-lab-sanhe-aws-etl-solutions/s3splitmerge/poc/merge-parquet/data-2.parquet\",\n boto3_session=boto_ses,\n )\n\n df1_ = wr.s3.read_parquet(\"s3://aws-data-lab-sanhe-aws-etl-solutions/s3splitmerge/poc/merge-parquet/data-1.parquet\")\n df2_ = wr.s3.read_parquet(\"s3://aws-data-lab-sanhe-aws-etl-solutions/s3splitmerge/poc/merge-parquet/data-2.parquet\")\n\n buffer = io.BytesIO()\n parquet_writer = pyarrow.parquet.ParquetWriter(\n buffer,\n pyarrow.Table.from_pandas(df1).schema,\n )\n\n for df in [df1, df2]:\n t = pyarrow.Table.from_pandas(df)\n parquet_writer.write_table(t)\n parquet_writer.close()\n\n # with open(\"data.parquet\", \"wb\") as f:\n # f.write(buffer.getvalue())\n # with open(\"data.parquet\", \"rb\") as f:\n # s3_client.put_object(Bucket=\"aws-data-lab-sanhe-aws-etl-solutions\", Key=\"s3splitmerge/poc/merge-parquet/data.parquet\", Body=f.read())\n # df = wr.s3.read_parquet(\"s3://aws-data-lab-sanhe-aws-etl-solutions/s3splitmerge/poc/merge-parquet/data.parquet\")\n\n # with open(\"data.parquet\", \"wb\") as f:\n # f.write(buffer.getvalue())\n df = wr.p.read_parquet(\"data.parquet\")\n\n print(df)\n\n\nif __name__ == \"__main__\":\n # create_many_files()\n # concatenate_files()\n # concatenate_in_memory()\n # df = pd.read_parquet(\"data-11.parquet\")\n # print(df)\n\n # concat_s3_parquet_file()\n\n print(df1.append(df2))\n print(df1)\n pass\n" ]
[ [ "pandas.DataFrame" ] ]
jenia90/Python
[ "04f156a8973d6156a4357e0717d9eb0aa264d086" ]
[ "machine_learning/scoring_functions.py" ]
[ "import numpy as np\n\n\"\"\" Here I implemented the scoring functions.\n MAE, MSE, RMSE, RMSLE are included.\n\n Those are used for calculating differences between\n predicted values and actual values.\n\n Metrics are slightly differentiated. Sometimes squared, rooted,\n even log is used.\n\n Using log and roots can be perceived as tools for penalizing big\n errors. However, using appropriate metrics depends on the situations,\n and types of data\n\"\"\"\n\n\n# Mean Absolute Error\ndef mae(predict, actual):\n \"\"\"\n Examples(rounded for precision):\n >>> actual = [1,2,3];predict = [1,4,3]\n >>> np.around(mae(predict,actual),decimals = 2)\n 0.67\n\n >>> actual = [1,1,1];predict = [1,1,1]\n >>> mae(predict,actual)\n 0.0\n \"\"\"\n predict = np.array(predict)\n actual = np.array(actual)\n\n difference = abs(predict - actual)\n score = difference.mean()\n\n return score\n\n\n# Mean Squared Error\ndef mse(predict, actual):\n \"\"\"\n Examples(rounded for precision):\n >>> actual = [1,2,3];predict = [1,4,3]\n >>> np.around(mse(predict,actual),decimals = 2)\n 1.33\n\n >>> actual = [1,1,1];predict = [1,1,1]\n >>> mse(predict,actual)\n 0.0\n \"\"\"\n predict = np.array(predict)\n actual = np.array(actual)\n\n difference = predict - actual\n square_diff = np.square(difference)\n\n score = square_diff.mean()\n return score\n\n\n# Root Mean Squared Error\ndef rmse(predict, actual):\n \"\"\"\n Examples(rounded for precision):\n >>> actual = [1,2,3];predict = [1,4,3]\n >>> np.around(rmse(predict,actual),decimals = 2)\n 1.15\n\n >>> actual = [1,1,1];predict = [1,1,1]\n >>> rmse(predict,actual)\n 0.0\n \"\"\"\n predict = np.array(predict)\n actual = np.array(actual)\n\n difference = predict - actual\n square_diff = np.square(difference)\n mean_square_diff = square_diff.mean()\n score = np.sqrt(mean_square_diff)\n return score\n\n\n# Root Mean Square Logarithmic Error\ndef rmsle(predict, actual):\n \"\"\"\n Examples(rounded for precision):\n >>> actual = [10,10,30];predict = [10,2,30]\n >>> np.around(rmsle(predict,actual),decimals = 2)\n 0.75\n\n >>> actual = [1,1,1];predict = [1,1,1]\n >>> rmsle(predict,actual)\n 0.0\n \"\"\"\n predict = np.array(predict)\n actual = np.array(actual)\n\n log_predict = np.log(predict + 1)\n log_actual = np.log(actual + 1)\n\n difference = log_predict - log_actual\n square_diff = np.square(difference)\n mean_square_diff = square_diff.mean()\n\n score = np.sqrt(mean_square_diff)\n\n return score\n\n\n# Mean Bias Deviation\ndef mbd(predict, actual):\n \"\"\"\n This value is Negative, if the model underpredicts,\n positive, if it overpredicts.\n\n Example(rounded for precision):\n\n Here the model overpredicts\n >>> actual = [1,2,3];predict = [2,3,4]\n >>> np.around(mbd(predict,actual),decimals = 2)\n 50.0\n\n Here the model underpredicts\n >>> actual = [1,2,3];predict = [0,1,1]\n >>> np.around(mbd(predict,actual),decimals = 2)\n -66.67\n \"\"\"\n predict = np.array(predict)\n actual = np.array(actual)\n\n difference = predict - actual\n numerator = np.sum(difference) / len(predict)\n denumerator = np.sum(actual) / len(predict)\n # print(numerator, denumerator)\n score = float(numerator) / denumerator * 100\n\n return score\n\n\ndef manual_accuracy(predict, actual):\n return np.mean(np.array(actual) == np.array(predict))\n" ]
[ [ "numpy.square", "numpy.log", "numpy.sqrt", "numpy.array", "numpy.sum" ] ]
kennethrithvik/smart_knee_brace
[ "be7cacb4487874b6639a62965fb25f0b4b1c196b" ]
[ "python_libraries/dataframe_logger/plotter.py" ]
[ "#!python3\n\n########\n'''\nMain logger to be used\nusage :\npython mqtt_logger <ACTION> <TIME in SECONDS>\n'''\n#######\nimport paho.mqtt.client as mqtt\nimport pandas as pd\nimport sys\nimport time\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# Don't forget to change the variables for the MQTT broker!\nmqtt_username = \"knee_brace\"\nmqtt_password = \"knee_brace\"\nmqtt_topic = \"knee_brace_nodemcu/#\"\nmqtt_broker_ip = \"192.168.1.190\"\nlist = []\ndataset = []\nclient = mqtt.Client()\n# Set the username and password for the MQTT client\nclient.username_pw_set(mqtt_username, mqtt_password)\n\n\ndef on_connect(client, userdata, rc, test):\n # rc is the error code returned when connecting to the broker\n print(\"Connected!\", str(client), str(userdata), str(rc), str(test))\n print(\"Topic: \", mqtt_topic + \": \")\n\n # Once the client has connected to the broker, subscribe to the topic\n client.subscribe(mqtt_topic)\n\ndef on_disconnect(client, userdata, rc):\n pass\n\n\ndef on_message(client, userdata, msg):\n msg=str(msg.payload.decode(\"utf-8\", \"ignore\"))\n list.append(msg)\n #print(msg)\n\nclient.on_connect = on_connect # attach function to callback\nclient.on_message = on_message # attach function to callback\nclient.on_disconnect = on_disconnect\nclient.connect(mqtt_broker_ip, 1883)\n\nprint(\"starting\")\nclient.loop_start()\n\n\n# Create figure for plotting\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nxs = []\nys = []\n\n# This function is called periodically from FuncAnimation\ndef animate(i, xs, ys):\n readings = []\n for data in list:\n if data is None:\n readings = []\n continue\n if data == \"***\":\n readings = []\n continue\n readings.append(data)\n if (len(readings) == 35):\n x=float(readings[1])\n y = float(readings[2])\n z = float(readings[3])\n dataset.append(readings)\n readings = []\n\n # Add x and y to lists\n xs.append(dt.datetime.now().strftime('%M:%S.%f'))\n ys.append([x,y,z])\n # Limit x and y lists to 20 items\n xs = xs[-20:]\n ys = ys[-20:]\n\n # Draw x and y lists\n ax.clear()\n ax.plot(xs, ys)\n\n # Format plot\n plt.xticks(rotation=45, ha='right')\n plt.subplots_adjust(bottom=0.30)\n plt.title('top-accel')\n plt.ylabel('top-accel')\n\n\n# Set up plot to call animate() function periodically\nani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=50)\nplt.show()\n\n#client.disconnect()\nprint(\"ending \")" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
vat99/casehold
[ "4f6703bb70def75abf534f13a5cb1201e7c295d7" ]
[ "multiple_choice/mc_no_trainer.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning a 🤗 Transformers model on multiple choice relying on the accelerate library without using a Trainer.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport argparse\nimport logging\nimport math\nimport os\nimport random\nfrom dataclasses import dataclass\nfrom typing import Optional, Union\n\nimport datasets\nimport torch\nfrom datasets import load_dataset, load_metric\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm.auto import tqdm\n\nfrom utils_multiple_choice import MultipleChoiceDataset, Split, processors\n\nimport transformers\nfrom accelerate import Accelerator\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_MAPPING,\n AdamW,\n AutoConfig,\n AutoModelForMultipleChoice,\n AutoTokenizer,\n PreTrainedTokenizerBase,\n SchedulerType,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\nfrom transformers.file_utils import PaddingStrategy\n\n\nlogger = logging.getLogger(__name__)\n# You should update this to your particular problem to have better documentation of `model_type`\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a text classification task\")\n parser.add_argument(\n \"--max_seq_length\",\n type=int,\n default=128,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--task_name\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n default=\"casehold\"\n )\n parser.add_argument(\n \"--data_dir\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n default=\"data_processed/\"\n )\n parser.add_argument(\n \"--cache_dir\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n default=\"cache_dir/\"\n )\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--config_name\",\n type=str,\n default=None,\n help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n type=str,\n default=None,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--per_device_train_batch_size\",\n type=int,\n default=4,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--per_device_eval_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the evaluation dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\"--num_train_epochs\", type=int, default=6, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=None,\n help=\"Model type to use if training from scratch.\",\n choices=MODEL_TYPES,\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Activate debug mode and run training only with a subset of data.\",\n )\n args = parser.parse_args()\n if args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n\n return args\n\n\n@dataclass\nclass DataCollatorForMultipleChoice:\n \"\"\"\n Data collator that will dynamically pad the inputs for multiple choice received.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_seq_length'`: Pad to a maximum length specified with the argument :obj:`max_seq_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_seq_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_seq_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature.pop(label_name) for feature in features]\n batch_size = len(features)\n num_choices = len(features[0][\"input_ids\"])\n flattened_features = [\n [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features\n ]\n flattened_features = sum(flattened_features, [])\n\n batch = self.tokenizer.pad(\n flattened_features,\n padding=self.padding,\n max_seq_length=self.max_seq_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"pt\",\n )\n\n # Un-flatten\n batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}\n # Add back labels\n batch[\"labels\"] = torch.tensor(labels, dtype=torch.int64)\n return batch\n\n\ndef main():\n args = parse_args()\n\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n accelerator = Accelerator()\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n \n try:\n processor = processors[args.task_name]()\n label_list = processor.get_labels()\n num_labels = len(label_list)\n except KeyError:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n\n # Load pretrained model and tokenizer\t\n config = AutoConfig.from_pretrained(\n\t\targs.config_name if args.config_name else args.model_name_or_path,\n\t\tnum_labels=num_labels,\n\t\tfinetuning_task=args.task_name,\n\t\tcache_dir=args.cache_dir,\n\t)\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n cache_dir=args.cache_dir,\n # Default fast tokenizer is buggy on CaseHOLD task, switch to legacy tokenizer\n use_fast=False,\n\t)\n model = AutoModelForMultipleChoice.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n\n model.resize_token_embeddings(len(tokenizer))\n\n # # Preprocessing the datasets.\n # # First we tokenize all the texts.\n # padding = \"max_seq_length\" if args.pad_to_max_seq_length else False\n\n # def preprocess_function(examples):\n # first_sentences = [[context] * 4 for context in examples[context_name]]\n # question_headers = examples[question_header_name]\n # second_sentences = [\n # [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n # ]\n # labels = examples[label_column_name]\n\n # # Flatten out\n # first_sentences = sum(first_sentences, [])\n # second_sentences = sum(second_sentences, [])\n\n # # Tokenize\n # tokenized_examples = tokenizer(\n # first_sentences,\n # second_sentences,\n # max_seq_length=args.max_seq_length,\n # padding=padding,\n # truncation=True,\n # )\n # # Un-flatten\n # tokenized_inputs = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n # tokenized_inputs[\"labels\"] = labels\n # return tokenized_inputs\n\n # processed_datasets = raw_datasets.map(\n # preprocess_function, batched=True, remove_columns=raw_datasets[\"train\"].column_names\n # )\n\n # train_dataset = processed_datasets[\"train\"]\n # eval_dataset = processed_datasets[\"validation\"]\n\n train_dataset = MultipleChoiceDataset(\n\t\t\t\t\tdata_dir=args.data_dir,\n\t\t\t\t\ttokenizer=tokenizer,\n\t\t\t\t\ttask=args.task_name,\n\t\t\t\t\tmax_seq_length=args.max_seq_length,\n\t\t\t\t\tmode=Split.train,\n\t\t\t\t)\n\n eval_dataset = MultipleChoiceDataset(\n data_dir=args.data_dir,\n tokenizer=tokenizer,\n task=args.task_name,\n max_seq_length=args.max_seq_length,\n # Pass mode=Split.test to load test split from file named test.csv in data directory\n mode=Split.dev,\n )\n\n test_dataset = MultipleChoiceDataset(\n data_dir=args.data_dir,\n tokenizer=tokenizer,\n task=args.task_name,\n max_seq_length=args.max_seq_length,\n # Pass mode=Split.test to load test split from file named test.csv in data directory\n mode=Split.test,\n )\n \n example_dataset = MultipleChoiceDataset(\n\t\t\t\t\tdata_dir=args.data_dir,\n\t\t\t\t\ttokenizer=tokenizer,\n\t\t\t\t\ttask=args.task_name,\n\t\t\t\t\tmax_seq_length=args.max_seq_length,\n\t\t\t\t\tmode=Split.example,\n\t\t\t\t)\n\n # Log a few random samples from the training set:\n # for index in random.sample(range(len(train_dataset)), 3):\n # logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # # DataLoaders creation:\n # if args.pad_to_max_seq_length:\n # # If padding was already done ot max length, we use the default data collator that will just convert everything\n # # to tensors.\n # data_collator = default_data_collator\n # else:\n # # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n # data_collator = DataCollatorForMultipleChoice(\n # tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)\n # )\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size\n )\n eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size)\n test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size)\n #example_dataloader = DataLoader(example_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size)\n \n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Use the device given by the `accelerator` object.\n device = accelerator.device\n model.to(device)\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader, test_dataloader = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, test_dataloader\n )\n\n # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be\n # shorter in multiprocess)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n \n # Metrics\n metric = load_metric(\"accuracy\")\n\n # example\n # model.eval()\n # for step, batch in enumerate(example_dataloader):\n # with torch.no_grad():\n # outputs = model(**batch)\n # predictions = outputs.logits.argmax(dim=-1)\n # metric.add_batch(\n # predictions=accelerator.gather(predictions),\n # references=accelerator.gather(batch[\"labels\"]),\n # )\n\n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n completed_steps = 0\n\n #import pdb; pdb.set_trace()\n eval_metrics = {args.seed: []}\n #test_metrics = []\n\n for epoch in range(args.num_train_epochs):\n model.train()\n for step, batch in enumerate(train_dataloader):\n outputs = model(**batch)\n loss = outputs.loss\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if completed_steps >= args.max_train_steps:\n break\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=accelerator.gather(predictions),\n references=accelerator.gather(batch[\"labels\"]),\n )\n\n eval_metric = metric.compute()\n eval_metrics[args.seed].append(eval_metric)\n accelerator.print(f\"\\n ---- validation: seed {args.seed} epoch {epoch}: {eval_metric} ----\")\n\n accelerator.print(f\"\\n <<<< validation metrics: {eval_metrics} >>>>\")\n \n model.eval()\n for step, batch in enumerate(test_dataloader):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=accelerator.gather(predictions),\n references=accelerator.gather(batch[\"labels\"]),\n )\n\n test_metric = metric.compute()\n accelerator.print(f\"\\n **** test: seed {args.seed} {test_metric} ****\")\n\n # if args.output_dir is not None:\n # accelerator.wait_for_everyone()\n # unwrapped_model = accelerator.unwrap_model(model)\n # unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "torch.utils.data.dataloader.DataLoader", "torch.no_grad", "torch.tensor" ] ]
alexf1991/CNBF
[ "1dda6a6beecd3c37e89e37ade39ec8dbdd76eea4" ]
[ "scripts/evaluate.py" ]
[ "import os\nimport numpy as np\nfrom absl import logging\nfrom absl import app\nfrom absl import flags\nfrom utils.utils import *\nfrom utils.trainer import ModelEnvironment\nfrom utils.summary_utils import Summaries\nfrom models.cnbf import CNBF\nfrom models.complex_cnn import CNBF_CNN\nfrom models.eval_functions.nbf_loss import EvalFunctions\nfrom loaders.feature_generator import feature_generator\nimport time\nimport numpy as np\nimport argparse\nimport json\nimport os\nimport sys\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense, Activation, LSTM, Input, Lambda\n# import keras.backend as K\nfrom loaders.feature_generator import feature_generator\nfrom utils.mat_helpers import *\n# from utils.keras_helpers import *\nfrom algorithms.audio_processing import *\nfrom utils.matplotlib_helpers import *\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nsys.path.append(os.path.abspath('../'))\n\n\ndef preprocess(sample):\n \"\"\"Preprocess a single sample.\"\"\"\n return sample\n\n\ndef data_generator(data, batch_size, is_training, is_validation=False, take_n=None, skip_n=None, input_shape=None):\n dataset = tf.data.Dataset.from_generator(data, (tf.float32, tf.float32))\n if is_training:\n shuffle_buffer = 64\n\n if skip_n != None:\n dataset = dataset.skip(skip_n)\n if take_n != None:\n dataset = dataset.take(take_n)\n\n if is_training:\n\n # dataset = dataset.shuffle(shuffle_buffer)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n else:\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n\n\ndef learning_rate_fn(epoch):\n if epoch >= 20 and epoch < 30:\n return 0.01\n elif epoch >= 30 and epoch < 40:\n return 0.001\n elif epoch >= 40:\n return 0.001\n else:\n return 1.0\n\n\n# ---------------------------------------------------------\n# ---------------------------------------------------------\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('model_dir', '/tmp', 'save directory name')\nflags.DEFINE_string('mode', 'local', 'Mode for the training local or cluster')\nflags.DEFINE_integer('start_epoch', 0, 'Number of epochs to train')\nflags.DEFINE_integer('batch_size', 5, 'Mini-batch size')\nflags.DEFINE_integer('eval_every_n_th_epoch', 1,\n 'Integer discribing after how many epochs the test and validation set are evaluted')\nflags.DEFINE_string('config_file', '../cnbf.json', 'Name of json configuration file')\n\n\n\ndef main(argv):\n try:\n\n task_id = int(os.environ['SLURM_ARRAY_TASK_ID'])\n\n except KeyError:\n\n task_id = 0\n\n model_save_dir = FLAGS.model_dir\n print(\"Saving model to : \" + str(model_save_dir))\n start_epoch = FLAGS.start_epoch\n load_model = True\n batch_size = FLAGS.batch_size\n\n # load config file\n try:\n print('*** loading config file: %s' % FLAGS.config_file)\n with open(FLAGS.config_file, 'r') as f:\n config = json.load(f)\n config[\"config_file_dir\"] = FLAGS.config_file\n config[\"predictions_path\"] = os.path.join(FLAGS.model_dir,\"predictions\")\n if not(os.path.exists(config[\"predictions_path\"])):\n os.makedirs(config[\"predictions_path\"])\n except:\n print('*** could not load config file: %s' % FLAGS.config_file)\n quit(0)\n\n # If load_model get old configuration\n if load_model:\n try:\n params = csv_to_dict(os.path.join(model_save_dir, \"model_params.csv\"))\n except:\n print(\"Could not find model hyperparameters!\")\n\n\n fgen_test = feature_generator(config, 'test', steps=batch_size)\n\n input_shape = (batch_size, fgen_test.nfram, fgen_test.nbin, fgen_test.nmic)\n # ResNet 18\n model = CNBF_CNN(config=config,\n fgen=fgen_test,\n n_ch_base=8,\n batch_size=batch_size,\n name=\"cnbf\",\n kernel_regularizer=tf.keras.regularizers.l2(2e-4),\n kernel_initializer=tf.keras.initializers.he_normal(),\n dropout=0.0)\n\n # Test data generator\n test_ds = data_generator(fgen_test.generate, batch_size, is_training=False, input_shape=input_shape)\n # Create summaries to log\n scalar_summary_names = [\"total_loss\",\n \"bf_loss\",\n \"weight_decay_loss\",\n \"accuracy\"]\n\n summaries = Summaries(scalar_summary_names=scalar_summary_names,\n learning_rate_names=[\"learning_rate\"],\n save_dir=model_save_dir,\n modes=[\"train\", \"test\"],\n summaries_to_print={\"train\": [\"total_loss\", \"accuracy\"],\n \"eval\": [\"total_loss\", \"accuracy\"]})\n\n # Create training setttings for models\n model_settings = [{'model': model,\n 'optimizer_type': tf.keras.optimizers.Adam,\n 'base_learning_rate': 1e-3,\n 'learning_rate_fn': learning_rate_fn,\n 'init_data': [tf.random.normal(input_shape), tf.random.normal(input_shape)],\n 'trainable': True}]\n\n # Build training environment\n env = ModelEnvironment(None,\n None,\n test_ds,\n 0,\n EvalFunctions,\n model_settings=model_settings,\n summaries=summaries,\n eval_every_n_th_epoch=1,\n num_train_batches=1,\n load_model=load_model,\n save_dir=model_save_dir,\n input_keys=[0, 1],\n label_keys=[],\n start_epoch=start_epoch)\n results = []\n for data in test_ds:\n res = env.predict(data,training=False)\n results.append(res[\"predictions\"])\n\n\nif __name__ == '__main__':\n app.run(main)\n\n" ]
[ [ "tensorflow.keras.regularizers.l2", "tensorflow.keras.initializers.he_normal", "tensorflow.random.normal", "tensorflow.data.Dataset.from_generator" ] ]
Draeius/loopwalk_kernel
[ "bbe0dd55275936abd4099e8c89f4f2bdca3d2be5" ]
[ "graph/UnlabeledLabelConverter.py" ]
[ "from collections import Counter\nfrom typing import Dict\n\nfrom grakel import Graph\n\nfrom graph.AbstractConverter import AbstractConverter\nfrom graph.GraphLoader import MatrixBuilder\nimport numpy as np\n\n\nclass UnlabeledLabelConverter(AbstractConverter):\n # A special converter, that applies the label kernel while ignoring labels\n _matrixBuilder = MatrixBuilder()\n\n def __init__(self, k):\n self.k = k\n\n def convert(self, graph: Graph) -> Dict[str, int]:\n adj = self._matrixBuilder.getAdjacencyMatrix(graph)\n counter = {}\n for i in range(1, self._k):\n diagonal = np.diag(adj)\n counter[i] = sum(diagonal)\n adj = np.matmul(adj, adj)\n\n if len(counter) == 0:\n counter['none'] = 1\n return dict(counter)\n" ]
[ [ "numpy.diag", "numpy.matmul" ] ]