query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Resample coarse segmentation tensor to the given bounding box and derive labels for each pixel of the bounding box
def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox): x, y, w, h = box_xywh_abs w = max(int(w), 1) h = max(int(h), 1) labels = F.interpolate(coarse_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resample_fine_and_coarse_segm_tensors_to_bbox(\n fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox\n):\n x, y, w, h = box_xywh_abs\n w = max(int(w), 1)\n h = max(int(h), 1)\n # coarse segmentation\n coarse_segm_bbox = F.interpolate(\n coarse_segm, (h, w), mode=\"bilinear\", align_corners=False\n ).argmax(dim=1)\n # combined coarse and fine segmentation\n labels = (\n F.interpolate(fine_segm, (h, w), mode=\"bilinear\", align_corners=False).argmax(dim=1)\n * (coarse_segm_bbox > 0).long()\n )\n return labels", "def resample_fine_and_coarse_segm_tensors_to_bbox(\n fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox\n):\n x, y, w, h = box_xywh_abs\n w = max(int(w), 1)\n h = max(int(h), 1)\n # coarse segmentation\n coarse_segm_bbox = F.interpolate(\n coarse_segm,\n (h, w),\n mode=\"bilinear\",\n align_corners=False,\n ).argmax(dim=1)\n # combined coarse and fine segmentation\n labels = (\n F.interpolate(fine_segm, (h, w), mode=\"bilinear\", align_corners=False).argmax(dim=1)\n * (coarse_segm_bbox > 0).long()\n )\n return labels", "def __call__(self, src, label, segm):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n segm = [tmask.resize(polys, (w, h), (img.shape[1], img.shape[0])) for polys in segm]\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n segm = [tmask.flip(polys, (w, h), flip_x=flips[0]) for polys in segm]\n\n # gt_masks (n, im_height, im_width) of uint8 -> float32 (cannot take uint8)\n masks = [mx.nd.array(tmask.to_mask(polys, (w, h))) for polys in segm]\n # n * (im_height, im_width) -> (n, im_height, im_width)\n masks = mx.nd.stack(*masks, axis=0)\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype), masks\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), masks, cls_target, box_target, box_mask", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img = img.astype(np.float32)\n\n if self.augmentation:\n img = self.random_color_aug(img)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype)\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), cls_target, box_target, box_mask", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n\n # print(gt_boxes)\n # fang[-1] ok\n\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n all_rois[:, 1:5].data,\n gt_boxes[:, :4].data)\n max_overlaps, gt_assignment = overlaps.max(1)\n labels = gt_boxes[gt_assignment, [4]]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = (max_overlaps >= cfg.TRAIN.FG_THRESH).nonzero().view(-1)\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = ((max_overlaps < cfg.TRAIN.BG_THRESH_HI) + (max_overlaps >= cfg.TRAIN.BG_THRESH_LO) == 2).nonzero().view(-1)\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.numel() > 0 and bg_inds.numel() > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.numel())\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(fg_rois_per_image), replace=False)).long().cuda()]\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.numel() < bg_rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_rois_per_image), replace=to_replace)).long().cuda()]\n elif fg_inds.numel() > 0:\n to_replace = fg_inds.numel() < rois_per_image\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = rois_per_image\n elif bg_inds.numel() > 0:\n to_replace = bg_inds.numel() < rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = torch.cat([fg_inds, bg_inds], 0)\n \n # Select sampled values from various arrays:\n labels = labels[keep_inds].contiguous()\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n # print(int(fg_rois_per_image)) -> 16\n\n rois = all_rois[keep_inds].contiguous()\n roi_scores = all_scores[keep_inds].contiguous()\n\n\n\n bbox_target_data, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, \\\n back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data\\\n = _compute_targets(rois[:, 1:5].data, gt_boxes[gt_assignment[keep_inds]][:, :4].data, labels.data,\\\n gt_boxes[gt_assignment[keep_inds]][:, 5:9].data, gt_boxes[gt_assignment[keep_inds]][:, 9:13].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 13:15].data, gt_boxes[gt_assignment[keep_inds]][:, 15:19].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 19:23].data, gt_boxes[gt_assignment[keep_inds]][:, 23:25].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 25:27].data)\n\n bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights \\\n = _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, \\\n front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data)\n \n \n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def distorted_bounding_box_crop(image,\n labels,\n bboxes,\n min_object_covered=0.05,\n aspect_ratio_range=(0.9, 1.1),\n area_range=(0.1, 1.0),\n max_attempts=200,\n scope=None):\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bboxes]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n bboxes = tf.minimum(bboxes, 1.0)\n bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.expand_dims(bboxes, 0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n distort_bbox)\n \n #tf_image.tf_summary_image(dst_image, bboxes, 'images_with_bounding_box')\n tf.summary.image('images_with_bounding_box', image_with_box)\n\n distort_bbox = distort_bbox[0, 0]\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n cropped_image.set_shape([None, None, 3])\n # Update bounding boxes: resize and filter out.\n bboxes = tfe.bboxes_resize(distort_bbox, bboxes)\n labels, bboxes, num = tfe.bboxes_filter_overlap(labels, bboxes,\n BBOX_CROP_OVERLAP)\n return cropped_image, labels, bboxes, distort_bbox,num", "def crop_bbox(img_sitk, label_sitk):\n\n # Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1) # only one label per image\n # print(bbox_dims)\n\n # Applying the bounding box to the image with spacing equal to spc\n spc = 0\n org = bbox_dims[0:3] - [spc]*3\n sz = bbox_dims[3:6] + [spc]*3\n training_patch = img_sitk[org[0]-spc:org[0]+sz[0]+spc,\n org[1]-spc:org[1]+sz[1]+spc,\n org[2]-spc:org[2]+sz[2]+spc]\n\n return training_patch", "def preprocess_pair(templar_buffer, search_buffer, templar_bbox, search_bbox, num_channels, is_training=True):\n\n '''\n *********************************** Templar image ****************************************\n * Get tight bbox, randomly shift +-8 pixels\n * Pad image to [2500, 2500] with mean RGB values\n * Crop to 256x256:\n * get tight bbox [w, h]\n * compute context margin p = (w+h)/4\n * extend bbox to [w+2p, h+2p], and get min(w+2p, h+2p)\n * extend bbox to [D, D] by adding the shorter side with max(w+2p, h+2p) - min(w+2p, h+2p)\n * crop [D, D] and rescale to [128, 128], get the rescale factor [s]\n * pad boundaries to [256,256] with mean RGB values\n \n \n *********************************** Search image ****************************************\n * Get tight bbox of the corresponding object in templar image\n * Randomly rescale in range(s*0.8, s*1.2), and update bbox position; [s] is computed during pre-process templar image\n * Pad image to [2500, 2500] with mean RGB values\n * Set bbox as the center and crop the image to [256, 256] so that search target is centered in the image\n '''\n\n # decode image buffers\n templar_img = tf.image.decode_jpeg(templar_buffer, channels=num_channels) # uint8\n search_img = tf.image.decode_jpeg(search_buffer, channels=num_channels) # uint8\n templar_bbox = tf.cast(templar_bbox, tf.int32)\n search_bbox = tf.cast(search_bbox, tf.int32)\n\n def return_zero_pad(x): return [0, tf.abs(x)]\n def return_iden_no_pad(x): return [x, 0]\n def return_maxW_pad(x, w_max): return [w_max - 1, x - (w_max - 1)]\n def return_maxH_pad(x, h_max): return [h_max - 1, x - (h_max - 1)]\n def flip_bbox(bbox, img_w):\n '''\n :param bbox: original bbox [xmin, ymin, xmax, ymax]\n :param img_w:\n :return: flipped bbox\n '''\n new_bbox = []\n new_bbox.append(img_w - bbox[2])\n new_bbox.append(bbox[1])\n new_bbox.append(img_w - bbox[0])\n new_bbox.append(bbox[3])\n\n return new_bbox\n\n ######################################## Process Templar #############################################\n # Get tight bbox, always keep the target at the center\n #templar_bbox = distort_bounding_box(input_bbox=templar_bbox, random_shift=8) # new box [xmin, ymin, xmax, ymax]\n # pad border in case distorted bbox out of boundary\n mean_rgb = tf.reduce_mean(tf.cast(templar_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.uint8)\n #templar_img = templar_img - mean_rgb\n #pad_border, pad_border = 10, 10\n #templar_img = tf.pad(tensor=templar_img, paddings=[[pad_border, pad_border], [pad_border, pad_border],[0, 0]],\n # mode='CONSTANT', name=None, constant_values=0)\n #templar_img = templar_img + mean_rgb\n # update tight bbox position, the size stays the same, the 4 corners are updated\n #templar_bbox[0] = templar_bbox[0] + pad_border\n #templar_bbox[1] = templar_bbox[1] + pad_border\n #templar_bbox[2] = templar_bbox[2] + pad_border\n #templar_bbox[3] = templar_bbox[3] + pad_border\n bbox_h = templar_bbox[3] - templar_bbox[1]\n bbox_w = templar_bbox[2] - templar_bbox[0]\n # save the (distorted) tight bbox for display\n tight_bbox = []\n tight_bbox.append(templar_bbox[0])\n tight_bbox.append(templar_bbox[1])\n tight_bbox.append(templar_bbox[2])\n tight_bbox.append(templar_bbox[3])\n p = tf.cast((bbox_h + bbox_w) / 4, tf.int32) # get context margin and compute new bbox\n argmin_dim = tf.math.argmin([bbox_w, bbox_h], axis=0) # 0: shorter in width, 1: shorter in height\n extend_w_cond = tf.equal(argmin_dim, 0) # true if extend in width dim, otherwise extend in height dim\n extend_side_cond = tf.equal(tf.math.abs(bbox_w-bbox_h) % 2, 0) # if true, extend evenly on both side\n extend_val_left = tf.cond(extend_side_cond,\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32),\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32) + 1)\n extend_val_right = tf.cast(tf.math.abs(bbox_w-bbox_h) / 2, tf.int32)\n # get a rect bbox by extending the shorter side\n templar_bbox_new = tf.cond(extend_w_cond, lambda: extend_bbox_w(templar_bbox, extend_val_left, extend_val_right),\n lambda: extend_bbox_h(templar_bbox, extend_val_left, extend_val_right))\n ## add context margin\n templar_bbox_new = [templar_bbox_new[0]-p, templar_bbox_new[1]-p, templar_bbox_new[2]+p, templar_bbox_new[3]+p]\n tight_bbox[0] = tight_bbox[0] - templar_bbox_new[0] # [xmin, ymin, xmax, ymax]\n tight_bbox[1] = tight_bbox[1] - templar_bbox_new[1]\n tight_bbox[2] = tight_bbox[2] - templar_bbox_new[0]\n tight_bbox[3] = tight_bbox[3] - templar_bbox_new[1]\n # here the rectangular bbox might already out of boundary, must pad precise number of pixels on left/up\n img_height = tf.shape(templar_img)[0]\n img_width = tf.shape(templar_img)[1]\n [new_x_min, pad_w_begin] = tf.cond(templar_bbox_new[0] < 0, lambda :return_zero_pad(templar_bbox_new[0]), lambda :return_iden_no_pad(templar_bbox_new[0]))\n [new_x_max, pad_w_end] = tf.cond(templar_bbox_new[2] >= img_width, lambda :return_maxW_pad(templar_bbox_new[2], img_width), lambda :return_iden_no_pad(templar_bbox_new[2]))\n [new_y_min, pad_h_begin] = tf.cond(templar_bbox_new[1] < 0, lambda :return_zero_pad(templar_bbox_new[1]), lambda :return_iden_no_pad(templar_bbox_new[1]))\n [new_y_max, pad_h_end] = tf.cond(templar_bbox_new[3] >= img_height, lambda :return_maxH_pad(templar_bbox_new[3], img_height), lambda :return_iden_no_pad(templar_bbox_new[3]))\n # do paddings, only effective if out of boundary\n templar_img = templar_img - mean_rgb\n templar_img = tf.pad(tensor=templar_img,\n paddings=[[pad_h_begin, pad_h_end + 10], [pad_w_begin, pad_w_end + 10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n templar_img = templar_img + mean_rgb\n # crop the image\n croped_templar = tf.image.crop_to_bounding_box(image=templar_img, offset_height=new_y_min,\n offset_width=new_x_min,\n target_height=templar_bbox_new[3]-templar_bbox_new[1],\n target_width=templar_bbox_new[2]-templar_bbox_new[0])\n with tf.control_dependencies([tf.debugging.assert_equal(templar_bbox_new[3] - templar_bbox_new[1],\n templar_bbox_new[2] - templar_bbox_new[0])]):\n # rescale to [127, 127], get the scale factor\n scale_s = 127.0 / tf.cast(templar_bbox_new[3] - templar_bbox_new[1], tf.float32)\n # rescale the tight bbox\n tight_temp_bbox = rescale_bbox(tight_bbox, scale_s)\n scale_s = tf.debugging.assert_all_finite(t=scale_s, msg='scale factor not a number!')\n croped_templar = tf.image.resize_bilinear(images=tf.expand_dims(croped_templar, axis=0), size=[127, 127])\n croped_templar = tf.squeeze(croped_templar, axis=0) # [h, w, 3]\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(croped_templar)[0], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[1], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[2], 3)]):\n templar_final = tf.identity(croped_templar)\n\n ######################################## Process Search image #############################################\n # Get rgb mean\n mean_rgb = tf.reduce_mean(tf.cast(search_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.float32)\n # Get random scale factor\n rescale_factor = scale_s * tf.random.uniform(shape=[], minval=0.8, maxval=1.2, dtype=tf.float32)\n rescale_factor = tf.debugging.assert_all_finite(t=rescale_factor, msg='rescale_factor factor not a number!')\n # Get rescaled bbox position, and the image\n search_bbox = rescale_bbox(search_bbox, rescale_factor)\n new_height = tf.cast(tf.cast(tf.shape(search_img)[0], tf.float32) * rescale_factor, tf.int32)\n new_width = tf.cast(tf.cast(tf.shape(search_img)[1], tf.float32) * rescale_factor, tf.int32)\n search_img = tf.image.resize_bilinear(images=tf.expand_dims(search_img, axis=0), size=[new_height, new_width])\n search_img = tf.squeeze(search_img, axis=0) # [h, w, 3]\n ### randomly shift bbox +-64 pixels, get the shift values and new bbox center\n search_bbox, h_shift, w_shift = distort_bounding_box(input_bbox=search_bbox, random_shift=32) # new box [xmin, ymin, xmax, ymax], h_shift, w_shift\n ### crop around the center of the bbox to [255, 255], if out of boundary, pad with mean rgb value\n img_width = tf.shape(search_img)[1]\n img_height = tf.shape(search_img)[0]\n x_center = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) + search_bbox[0]\n y_center = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) + search_bbox[1]\n x_min, x_max = x_center - 127, x_center + 127\n y_min, y_max = y_center - 127, y_center + 127\n [new_x_min, pad_w_begin] = tf.cond(x_min < 0, lambda :return_zero_pad(x_min), lambda :return_iden_no_pad(x_min))\n [new_x_max, pad_w_end] = tf.cond(x_max >= img_width, lambda :return_maxW_pad(x_max, img_width), lambda :return_iden_no_pad(x_max))\n [new_y_min, pad_h_begin] = tf.cond(y_min < 0, lambda :return_zero_pad(y_min), lambda :return_iden_no_pad(y_min))\n [new_y_max, pad_h_end] = tf.cond(y_max >= img_height, lambda :return_maxH_pad(y_max, img_height), lambda :return_iden_no_pad(y_max))\n # do paddings, only effective if out of boundary\n search_img = search_img - mean_rgb\n search_img = tf.pad(tensor=search_img, paddings=[[pad_h_begin, pad_h_end+10], [pad_w_begin, pad_w_end+10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n search_img = search_img + mean_rgb\n # crop\n search_final = tf.image.crop_to_bounding_box(image=search_img, offset_height=new_y_min, offset_width=new_x_min,\n target_height=255, target_width=255)\n ## get tight bbox within the rescaled search img [xmin, ymin, xmax, ymax]\n bbox_h_half = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) # might be zero\n bbox_w_half = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) # might be zero\n tight_search_bbox = []\n tight_search_bbox.append(127 - bbox_w_half - w_shift) # xmin\n tight_search_bbox.append(127 - bbox_h_half - h_shift) # ymin\n tight_search_bbox.append(127 + bbox_w_half - w_shift) # xmax\n tight_search_bbox.append(127 + bbox_h_half - h_shift) # ymax\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(search_final)[0], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[1], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[2], 3)]):\n search_final = tf.identity(search_final)\n\n ######################################## Process Score Map GT #############################################\n # [17, 17, 1], [17, 17, 1]\n # consider 8 x (center - offset) <= 16 as positives, stride=8; also note that target in search image is already shifted\n t_center_x = 8 - tf.cast(w_shift / 8, tf.int32)\n t_center_y = 8 - tf.cast(h_shift / 8, tf.int32)\n score, score_weight = tf.py_func(func=build_gt_py, inp=[t_center_x, t_center_y], Tout=[tf.int32, tf.float32],\n stateful=True, name=None)\n \"\"\"\n score = tf.zeros([17, 17, 1], dtype=tf.int32)\n delta = tf.sparse.SparseTensor(indices=[[t_center_y, t_center_x, 0]], values=[1], dense_shape=[17,17,1])\n score = score + tf.sparse.to_dense(delta)\n score = tf.expand_dims(score, axis=0) # [1,17,17,1]\n dila_structure = np.array([[False, False, True, False, False],\n [False, True, True, True, False],\n [True, True, True, True, True],\n [False, True, True, True, False],\n [False, False, True, False, False]], dtype=bool)\n dila_structure = dila_structure.astype(np.int32)\n dila_structure = np.expand_dims(dila_structure, axis=-1) # [5,5,1]\n score = tf.nn.dilation2d(input=score, filter=dila_structure, strides=[1,1,1,1], rates=[1,1,1,1], padding='SAME')\n num_total = 17 * 17\n num_positive = tf.reduce_sum(score)\n num_negative = num_total - num_positive\n weight_positive = tf.cast(num_negative, tf.float32) / tf.cast(num_total, tf.float32)\n weight_negative = tf.cast(num_positive, tf.float32) / tf.cast(num_total, tf.float32)\n mat_positive = tf.cast(score, tf.float32) * weight_positive # float\n mat_negative = (1.0 - tf.cast(score, tf.float32)) * weight_negative # float\n score_weight = mat_positive + mat_negative\n score = tf.squeeze(score, 0)\n score_weight = tf.squeeze(score_weight, 0)\n \"\"\"\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(score)[0], 17),\n tf.debugging.assert_equal(tf.shape(score)[1], 17),\n tf.debugging.assert_equal(tf.shape(score)[2], 1),\n tf.debugging.assert_equal(tf.shape(score_weight)[0], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[1], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[2], 1)]):\n score = tf.identity(score)\n score_weight = tf.identity(score_weight)\n\n ################################### Randomly flip templar/search images ####################################\n flip_v = tf.random.uniform(shape=[]) # scalar\n flip_v = tf.greater_equal(flip_v, 0.5)\n templar_final = tf.cond(flip_v, lambda : tf.image.flip_left_right(image=templar_final), lambda :templar_final)\n search_final = tf.cond(flip_v, lambda: tf.image.flip_left_right(image=search_final), lambda: search_final)\n score = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score), lambda :score)\n score_weight = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score_weight), lambda :score_weight)\n tight_search_bbox = tf.cond(flip_v, lambda :flip_bbox(tight_search_bbox, 255), lambda :tight_search_bbox)\n\n templar_final = mean_image_subtraction(templar_final, _CHANNEL_MEANS, num_channels)\n search_final = mean_image_subtraction(search_final, _CHANNEL_MEANS, num_channels)\n\n return templar_final, search_final, score, score_weight, tight_temp_bbox, tight_search_bbox", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def _preprocess_for_training(image, gt_mask):\n rand = np.random.rand(4)\n\n \"\"\" step 1. random cropping \"\"\"\n if rand[3] > 0.5:\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0],\n dtype=tf.float32,\n shape=[1, 1, 4])\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.7, 1.0),\n max_attempts=100,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n image = tf.slice(image, bbox_begin, bbox_size)\n gt_mask = tf.slice(gt_mask, bbox_begin, bbox_size)\n image.set_shape([None, None, 3])\n\n \"\"\" step 2. resize \"\"\"\n # ratio = np.random.uniform(0.8, 1.2)\n # new_width = int(300.0 * ratio)\n # new_height = int(400.0 * ratio)\n # print(new_width, new_height)\n\n new_width = 400\n new_height = 400\n\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False)\n image = tf.squeeze(image, axis=[0])\n\n gt_mask = tf.expand_dims(gt_mask, 0)\n gt_mask = tf.cast(gt_mask, tf.float32)\n gt_mask = tf.image.resize_bilinear(gt_mask, [new_height, new_width], align_corners=False)\n gt_mask = tf.squeeze(gt_mask, axis=[0])\n\n image = tf.reshape(image, [new_height, new_width, 3])\n gt_mask = tf.reshape(gt_mask, [new_height, new_width, 1])\n\n \"\"\" step 3. random flipping \"\"\"\n flip_thresh = tf.constant(rand[1], dtype=tf.float32)\n val = tf.constant(0.5, dtype=tf.float32)\n image, gt_mask = tf.cond(tf.greater_equal(flip_thresh, val),\n lambda : (_flip_image(image),\n _flip_gt_masks(gt_mask)),\n lambda : (image, gt_mask))\n\n\n \"\"\" step 4. random rotation \"\"\"\n rotate_thresh = tf.constant(rand[2], dtype=tf.float32)\n image, gt_mask = tf.cond(tf.greater_equal(rotate_thresh, val),\n lambda : (_rotate_images(image, gt_mask, rand[3])),\n lambda : (image, gt_mask))\n\n\n \"\"\" step 5. convert [0, 255] to [0.0, 1.0] \"\"\"\n image = tf.image.convert_image_dtype(image, tf.float32)\n gt_mask = tf.image.convert_image_dtype(gt_mask, tf.float32)\n image = image / 255.0\n gt_mask = gt_mask / 255.0\n\n return image, gt_mask", "def get_x_y(self, indices: List[int], raw=False):\n\n annotations = []\n batch_of_input_images, batch_of_mask_sets, batch_of_bbox_sets, batch_of_label_sets, num_labels = super(RetinaDataset, self)._get_x_y(\n indices=indices,\n autoscale=True,\n use_masks=False,\n do_preprocessing=True,\n downscale=True\n )\n\n # Extract boxes\n for batch, sets in enumerate(zip(batch_of_input_images, batch_of_bbox_sets, batch_of_label_sets)):\n image, box_set, label_set = sets\n annotations.append({\n 'bboxes': box_set,\n 'labels': label_set\n })\n\n # Uncomment for DEBUG\n # ==========================\n # # ==========================\n # if self.is_training_dataset:\n # draw = image.copy()\n #\n # draw[..., 0] += 123.68 # R\n # draw[..., 1] += 116.779 # G\n # draw[..., 2] += 103.939 # B\n #\n # for label, box in zip(label_set, box_set):\n # draw_box(draw, [int(box[1]), int(box[0]), int(box[3]), int(box[2])], color=(255, 200, 0))\n # caption = \"{} {:.3f}\".format(label, 0)\n #\n # # print(self.labels.index(obj['name']) )\n #\n # cv2.putText(\n # img=draw,\n # text=caption,\n # org=(int(box[0]), int(box[1]) - 10),\n # fontFace=cv2.FONT_HERSHEY_PLAIN,\n # fontScale=1,\n # color=(255, 200, 0),\n # thickness=1)\n #\n # from matplotlib import pyplot as plt\n # fig = plt.figure(figsize=(10,15))\n # plt.axis('off')\n # try:\n # plt.imshow(draw.astype(np.uint8))\n # except:\n # pass\n # # plt.show()\n # Image.fromarray(draw.astype('uint8')).save('train_images/{}.png'.format(randint(0, 1000)))\n # # with open('train_images/{}.png'.format(randint(0, 1000)), 'wb') as f:\n # # fig.savefig(f, format='png')\n\n # # exit(0)\n # ==========================\n # ==========================\n\n # Compute regression targets\n targets = (batch_of_input_images, annotations) if raw else self.compute_targets(batch_of_input_images, annotations)\n # batch_of_input_images = self.compute_inputs(batch_of_input_images)\n return batch_of_input_images, list(targets)", "def transform_with_label(aug):\n\n geometric_tfx = get_geometric_transformer(aug)\n intensity_tfx = get_intensity_transformer(aug)\n\n def transform(comp, c_label, c_img, use_onehot, nclass, **kwargs):\n \"\"\"\n Args\n comp: a numpy array with shape [H x W x C + c_label]\n c_label: number of channels for a compact label. Note that the current version only supports 1 slice (H x W x 1)\n nc_onehot: -1 for not using one-hot representation of mask. otherwise, specify number of classes in the label\n\n \"\"\"\n comp = copy.deepcopy(comp)\n if (use_onehot is True) and (c_label != 1):\n raise NotImplementedError(\"Only allow compact label, also the label can only be 2d\")\n assert c_img + 1 == comp.shape[-1], \"only allow single slice 2D label\"\n\n # geometric transform\n _label = comp[..., c_img ]\n _h_label = np.float32(np.arange( nclass ) == (_label[..., None]) )\n comp = np.concatenate( [comp[..., :c_img ], _h_label], -1 )\n comp = geometric_tfx(comp)\n # round one_hot labels to 0 or 1\n t_label_h = comp[..., c_img : ]\n t_label_h = np.rint(t_label_h)\n assert t_label_h.max() <= 1\n t_img = comp[..., 0 : c_img ]\n\n # intensity transform\n t_img = intensity_tfx(t_img)\n\n if use_onehot is True:\n t_label = t_label_h\n else:\n t_label = np.expand_dims(np.argmax(t_label_h, axis = -1), -1)\n return t_img, t_label\n\n return transform", "def make_sub_data_train(data, config):\n sub_input_sequence = []\n sub_label_sequence = []\n\n\tfor scale in range(2,5):\t \n\n\t for i in range(len(data)):\n\n\t\t#input_, label_, = preprocess(data[i], config.scale) # do bicbuic only one scale\n\t\tinput_, label_, = preprocess(data[i], scale) # do bicbuic turn around all scale\n\t\n\t\tif len(input_.shape) == 3: # is color\n\t\t h, w, c = input_.shape\n\t\telse:\n\t\t h, w = input_.shape # is grayscale\n\t\n\t\t#checkimage(input_)\t\t\n\n\t\tnx, ny = 0, 0\n\t\tfor x in range(0, h - config.image_size + 1, config.stride):\n\t\t nx += 1; ny = 0\n\t\t for y in range(0, w - config.image_size + 1, config.stride):\n\t\t\tny += 1\n\n\t\t\tsub_input = input_[x: x + config.image_size, y: y + config.image_size] # 41 * 41\n\t\t\tsub_label = label_[x: x + config.label_size, y: y + config.label_size] # 41 * 41\n\n\n\t\t\t# Reshape the subinput and sublabel\n\t\t\tsub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])\n\t\t\tsub_label = sub_label.reshape([config.label_size, config.label_size, config.c_dim])\n\n\t\t\t# Normialize\n\t\t\tsub_input = sub_input / 255.0\n\t\t\tsub_label = sub_label / 255.0\n\t\t\t\n\t\t\t#cv2.imshow(\"im1\",sub_input)\n\t\t\t#cv2.imshow(\"im2\",sub_label)\n\t\t\t#cv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t#cv2.waitKey(0)\n\n\t\t\t# Rotate 90,180,270\n\t\t\tfor angle in range(0,360,90):\t\n\t\t\t\tsub_input = rotate(sub_input,angle)\t\n\t\t\t\tsub_label = rotate(sub_label,angle)\t\n\t\t\n\t\t\t\t# Add to sequence\n\t\t\t\tsub_input_sequence.append(sub_input)\n\t\t\t\tsub_label_sequence.append(sub_label)\n\n\t\t\t\tcv2.imshow(\"im1\",sub_input)\n\t\t\t\tcv2.imshow(\"im2\",sub_label)\n\t\t\t\tcv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t\tcv2.waitKey(1)\n\t\t\t\t\n\n \n # NOTE: The nx, ny can be ignore in train\n return sub_input_sequence, sub_label_sequence, nx, ny", "def _extract_sample(self, features, masks, imin, imax, shapev, needslabels=False, one_hot=True):\n\n # prepare containers\n tempdata = np.zeros([len(features)] + self.w, dtype=np.float32)\n featuredata = [f.squeeze() for f in features]\n templabels = []\n\n # accumulate mean and std for normalization\n if self.whiten and not self.whiten_subvolumes:\n numvoxs = [\n np.prod([s if g is None else g for g, s in zip(self.presize_for_normalization, f.squeeze().shape)]) for\n f in featuredata]\n means = [np.sum(f) * 1.0 / n for f, n in zip(featuredata, numvoxs)]\n stddevs = [np.sqrt(np.abs(np.mean((featuredata[i] - means[i]) ** 2))) for i in range(len(featuredata))]\n\n if np.sum(self.deform) + np.sum(self.rotation) + np.sum(self.scaling) + np.sum(\n self.shift) == 0 and not self.interpolate_always: # No deformation/scaling/rotation\n # infer the valid part of subvolume in both source and target\n ranges = np.zeros((len(imin), 2), dtype=np.int32)\n ranges[:, 1] = 1\n ranges[:len(self.w), 1] = self.w\n imin = np.int32(imin)\n imax = np.int32(imax)\n for i in range(len(imin)):\n if imin[i] < 0:\n ranges[i, 0] -= imin[i]\n imin[i] -= imin[i]\n if imax[i] >= shapev[i]:\n ranges[i, 1] -= ((imax[i] - shapev[i]))\n imax[i] -= ((imax[i] - shapev[i]))\n # now index accordingly:\n targetindex = tuple([slice(None)] + [slice(np.int32(r[0]), np.int32(r[1])) for r in ranges])\n sourcesindex = tuple([slice(np.int32(mi), np.int32(ma)) for mi, ma in zip(imin, imax)])\n tempdata[targetindex] = np.asarray([f[sourcesindex] for f in featuredata])\n\n if len(masks):\n templabels = np.zeros(self.w, dtype=np.uint8)\n templabels[targetindex[1:]] = np.asarray([f.squeeze()[sourcesindex] for f in masks])\n if one_hot and not self.regression:\n templabels = self._one_hot_vectorize(templabels, self.nclasses, zero_out_label=self.zero_out_label)\n\n\n else: # we need to interpolate\n coords = np.float64(np.mgrid[[slice(np.int32(imi), np.int32(ima)) for imi, ima in zip(imin, imax)]])\n # coords = np.mgrid[imin[0]:imax[0],imin[1]:imax[1],imin[2]:imax[2]]\n coords = self.transformAffine(coords)\n if np.sum(self.deform):\n # create deformationfield:\n deform = self._get_deform_field_dm\n\n self.deformfield = deform()\n coords += self.deformfield\n\n # and set accordingly:\n if len(masks):\n if one_hot and not self.regression:\n if len(masks) > 1:\n logging.getLogger('data').error(\n 'cant have more than one mask with one_hot encoding in griddatacollection')\n if self.softlabels:\n mask = self._one_hot_vectorize(np.int32(masks[0]), self.nclasses,\n zero_out_label=self.zero_out_label)\n templabels = [map_coordinates(mask[..., c].squeeze(), coords, order=1, cval=np.float32(c == 0))\n for c in range(self.nclasses)]\n templabels = np.concatenate([np.expand_dims(l, -1) for l in templabels], axis=-1)\n else:\n templabels = map_coordinates(masks[0].squeeze(), coords, order=0)\n templabels = self._one_hot_vectorize(templabels, self.nclasses,\n zero_out_label=self.zero_out_label)\n\n if needslabels:\n if np.sum(np.asarray(templabels[..., self.minlabel:])) == 0:\n return [], []\n\n else:\n # logging.getLogger('data').warning(\n # 'maybe you want to revise this section before using! when do we not need a onehot?')\n templabels = np.asarray(\n [map_coordinates(f.squeeze(), coords, order=1 if self.softlabels else 0) for f in masks])\n templabels = templabels.transpose([i for i in range(1, len(templabels.shape))] + [0])\n if needslabels:\n if np.sum(templabels >= self.minlabel) == 0:\n return [], []\n tempdata = [map_coordinates(np.float32(f).squeeze(), coords, mode=self.padding_rule,\n order=self.interpolation_order) for f in features]\n tempdata = [x.reshape((self.w + [1])) for x in tempdata] # FIXME: maybe we can just use expand_dims?\n if self.whiten:\n if self.whiten_subvolumes:\n raise Exception('not supported anymore')\n # for i in range(len(tempdata)):\n # tempdata[i] = tempdata[i] - np.mean(tempdata[i])\n # tempdata[i] /= np.sqrt(np.mean(tempdata[i] ** 2)) + 1e-20\n elif self.half_gaussian_clip:\n raise Exception('not supported anymore')\n # tempdata = [np.clip((x - means[i]) / (5 * stddevs[i]) - 1, -0.99999, 0.99999) for i, x in\n # enumerate(tempdata)]\n else:\n tempdata = [(x - means[i]) / stddevs[i] for i, x in enumerate(tempdata)]\n if self.vary_mean > 0 or self.vary_stddev > 0:\n tempdata = [x * ((self.deformrandomstate.rand() - 0.5) * self.vary_stddev + 1) + (\n self.deformrandomstate.rand() - 0.5) * self.vary_mean for x in tempdata]\n tempdata = np.concatenate(tempdata, -1)\n\n if np.sum(self.mirror):\n fr = []\n orig = []\n for i in self.mirror:\n fr.append(slice(None, None, np.int32(1 - self.deformrandomstate.randint(2) * i * 2)))\n orig.append(slice(None))\n fr.append(slice(None)) # features / labels\n orig.append(slice(None))\n tempdata[orig] = tempdata[fr]\n templabels[orig] = templabels[fr]\n if self.gaussiannoise > 0:\n tempdata *= (1 + (self.deformrandomstate.rand(*tempdata.shape) - 0.5) * self.gaussiannoise)\n return tempdata, templabels", "def reformat(x, y):\r\n # img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))\r\n img_size, num_ch, num_class = 14, 1, 16\r\n dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)\r\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??\r\n return dataset, labels", "def prepare_label(input_batch, new_size, num_classes, one_hot=True, task='seg'):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.\n if task == 'seg':\n input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.\n if one_hot:\n input_batch = tf.one_hot(input_batch, depth=num_classes)\n return input_batch", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def nextBatch(self, TRAIN=True, d=False):\n while True:\n if TRAIN==True:\n idx=np.random.randint(self.split*self.total)\n else:\n idx=np.random.randint(self.split*self.total,high=self.total)\n \n if len(self.roidb[idx])!=0:\n break\n \n data=self.imdb[idx][np.newaxis,:]\n gt_boxes=np.array(self.roidb[idx])\n \n maskdb=self.maskdb[idx]\n mask_max_x=0\n mask_max_y=0\n for ins in maskdb:\n if ins.shape[0]>mask_max_y:\n mask_max_y=ins.shape[0]\n if ins.shape[1]>mask_max_x:\n mask_max_x=ins.shape[1]\n\n gt_masks=np.zeros((len(maskdb),mask_max_y,mask_max_x))\n mask_info=np.zeros((len(maskdb),2))\n for j in range(len(maskdb)):\n mask=maskdb[j]\n mask_x=mask.shape[1]\n mask_y=mask.shape[0]\n gt_masks[j,0:mask_y,0:mask_x]=mask\n mask_info[j,0]=mask_y\n mask_info[j,1]=mask_x\n\n blobs={\n 'data': data,\n 'gt_boxes': gt_boxes,\n 'im_info': np.array([[data.shape[2],data.shape[3],1]], dtype=np.float32),\n 'gt_masks':gt_masks,\n 'mask_info':mask_info\n }\n if d: \n # i is always 1, in ultrasound case\n for i in range(blobs['data'].shape[0]):\n print blobs['im_info']\n print blobs['mask_info']\n print blobs['gt_boxes']\n img=blobs['data'][0,0]\n print img.shape\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.imshow(img)\n for j,bbox in enumerate(gt_boxes):\n blank=np.zeros_like(img)\n print blank.shape,maskdb[j].shape,bbox\n blank[bbox[1]:maskdb[j].shape[0]+bbox[1],bbox[0]:maskdb[j].shape[1]+bbox[0]]=maskdb[j]\n blank[blank>0]=1\n plt.imshow(blank,alpha=.9)\n ax.add_patch(patches.Rectangle((bbox[0],bbox[1]),bbox[2]-bbox[0],bbox[3]-bbox[1],fill=False))\n plt.text(bbox[0],bbox[1],bbox[-1],bbox=dict(facecolor='blue',alpha=0.5),fontsize=14, color='white')\n plt.show()\n for i in blobs:\n print i,blobs[i].shape\n print ''\n return blobs", "def interp_filtering(input_block, kernel_size, x_frac, y_frac):\n input_block = input_block.astype(np.float)\n label = np.zeros((input_block.shape[0] - kernel_size + 1, input_block.shape[1] - kernel_size + 1, 1))\n\n # only horizontal filtering\n if x_frac != 0 and y_frac == 0:\n filter_x = filter_coefficients(x_frac)\n for i, j in product(range(label.shape[0]), range(label.shape[1])):\n label[i, j, :] = sum(val * input_block[i + 6, j + ind + 3, :] for ind, val in enumerate(filter_x))\n label[i, j, :] = clip_round(label[i, j, :])\n # only vertical filtering\n elif x_frac == 0 and y_frac != 0:\n filter_y = filter_coefficients(y_frac)\n for i, j in product(range(label.shape[0]), range(label.shape[1])):\n label[i, j, :] = sum(val * input_block[i + ind + 3, j + 6, :] for ind, val in enumerate(filter_y))\n label[i, j, :] = clip_round(label[i, j, :])\n # horizontal and vertical filtering\n elif x_frac != 0 and y_frac != 0:\n temp = np.zeros((label.shape[0] + 7, label.shape[1], label.shape[2]))\n filter_x = filter_coefficients(x_frac)\n for i, j in product(range(temp.shape[0]), range(temp.shape[1])):\n temp[i, j, :] = sum(val * input_block[i + 3, j + ind + 3, :] for ind, val in enumerate(filter_x))\n temp[i, j, :] = clip_round(temp[i, j, :])\n filter_y = filter_coefficients(y_frac)\n for i, j in product(range(label.shape[0]), range(label.shape[1])):\n label[i, j, :] = sum(val * temp[i + ind, j, :] for ind, val in enumerate(filter_y))\n label[i, j, :] = clip_round(label[i, j, :])\n\n return label.astype(np.int16)", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\r\n # MP:\r\n # overlaps: (no_rois x no_gt_bbox) each row gives the overlap of the proposed region with the gt boxes. Overlap is measured as: (overlapping area)/(union area).\r\n # gt_assignment: determines which of the gt boxes has more overlap with the regions\r\n # max_overlaps: takes the maximum overlap of a region\r\n # labels: defines which which gt box corresponds best with the region and assigns its label to the region\r\n # fg_rois_per_image = 8\r\n # overlaps: (rois x gt_boxes)\r\n\r\n # MP: bbox_overlaps rewritten as c_bbox_overlaps\r\n #overlaps =c_bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n # \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n overlaps = bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n # MP: which column index has maximum value\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_boxes[gt_assignment, 4]\r\n\r\n\r\n # MP: Extract RoIs where overlap >= FG_THRESH\r\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\r\n\r\n # Guard against the case when an image has fewer than fg_rois_per_image (i.e. 8)\r\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\r\n\r\n # Sample foreground regions without replacement\r\n if fg_inds.size > 0:\r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\r\n\r\n # MP: Extract RoIs where overlap in [BG_THRESH_LO, BG_THRESH_HI), i.e. [0.0, 0.5)\r\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\r\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\r\n\r\n # Compute number of background RoIs to take from this image (guarding\r\n # against there being fewer than desired)\r\n # MP: Take the no of bg_inds such that fg_inds.shape + bg_inds.shape = 32\r\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\r\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\r\n if bg_inds.size > 0:\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\r\n\r\n\r\n # MP: concatenate the fg_inds and bg_inds, such that keep_inds.shape = 32\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n # MP: obtain the labels set the ones corresponding to bg_inds to zero\r\n labels = labels[keep_inds]\r\n labels[int(fg_rois_per_this_image):] = 0\r\n\r\n # MP: select the 32 rois (fg & bg) from the 2000+ rois with the keep_inds\r\n rois = all_rois[keep_inds]\r\n # MP: fg rois\r\n rois_pos = np.zeros((fg_inds.size, 5), dtype=np.float32) #because return rois_pos as top ---> allocate memory for it\r\n rois_pos[:, :] = all_rois[fg_inds]\r\n gt_assignment_pos = gt_assignment[fg_inds]\r\n\r\n # MP: compute diff to approximate bbox to ground truth\r\n bbox_target_data = _compute_targets(\r\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\r\n\r\n # MP: set the diff values in a matrix where each row corresponds to a foreground bbox\r\n # and the values are stored starting at the index of the label.\r\n # Therefore number of columns: 4*(no labels)\r\n # The bg bboxes are also included in rows, but have all values equal to zero.\r\n bbox_targets, bbox_inside_weights = \\\r\n _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n\r\n '''\r\n # MP: printing and saving files\r\n print \"overlaps with size {}: {}\".format(overlaps.shape, overlaps)\r\n print \"gt_assignment with size {}: {}\".format(gt_assignment.shape, gt_assignment)\r\n print \"max_overlaps with size{}: {}\".format(max_overlaps.shape, max_overlaps)\r\n print \"labels with size{}: {}\".format(labels.shape, labels)\r\n print \"bg_inds with size{}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"bg_rois_per_this_image: {}\".format(bg_rois_per_this_image)\r\n print \"bg_inds with shape {}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"fg_inds with size {}: {}\".format(fg_inds.shape, fg_inds)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois wiht shape {}: {}\".format(rois.shape, rois)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"gt_assignment_pos wiht shape {}: {}\".format(gt_assignment_pos.shape, gt_assignment_pos)\r\n print \"bbox_target_data wiht shape {}: {}\".format(bbox_target_data.shape, bbox_target_data)\r\n print \"diff: {}\".format(rois_pos[:,:] + bbox_target_data[0:fg_inds.size,:])\r\n print \"bbox_targets with size {}: {}\".format(bbox_targets.shape, bbox_targets)\r\n print \"bbox_inside_weights with size {}: {}\".format(bbox_inside_weights.shape, bbox_inside_weights)\r\n\r\n np.savetxt('bbox_targets.txt', bbox_targets, delimiter=',')\r\n np.savetxt('bbox_inside_weights.txt', bbox_inside_weights, delimiter=',')\r\n '''\r\n\r\n return labels, rois, bbox_targets, bbox_inside_weights, gt_boxes[gt_assignment[keep_inds], :], rois_pos, gt_assignment_pos", "def reformat(x, y, img_size, num_ch, num_class):\n dataset = x.reshape(\n (-1, img_size, img_size, num_ch)).astype(np.float32)\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32)\n return dataset, labels", "def nms_all_class(bound_corr_objs, nms_thresh):\n bboxs, scores, masks, labels = [], [], [], []\n for obj in bound_corr_objs:\n bboxs.append(obj['box'])\n scores.append(obj['score'])\n # masks.append(obj['mask'])\n # labels.append(obj['label'])\n bboxs = np.asarray(bboxs)\n scores = np.asarray(scores)\n # masks = np.asarray(masks)\n # labels = np.asarray(labels)\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n order = scores.argsort()[::-1]\n # mask_sizes = np.sum(masks, axis=(1, 2))\n # order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n\n # mask_other = masks[order[1:], :, :]\n # mask_cur = masks[i, :, :]\n # mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n # mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n # mask_iou = mask_inter / mask_union\n\n # inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n inds = np.where(iou <= nms_thresh)[0]\n order = order[inds + 1]\n\n # masks = masks[keep]\n # ids = ids[keep]\n return keep", "def map_measurements_on_labels(labels_layer:\"napari.layers.Labels\", column:str = \"label\", viewer:\"napari.Viewer\" = None) -> \"napari.types.ImageData\":\n import pandas as pd\n import dask.array as da\n from dask import delayed\n from functools import partial\n\n labels = labels_layer.data\n table = pd.DataFrame(labels_layer.properties)\n\n # special treatment for time series\n if len(labels.shape) == 4:\n # determine how the Frame column is called; in case there is any\n frame_column = None\n for potential_frame_column in ['frame', 'Frame']:\n if potential_frame_column in table.keys():\n frame_column = potential_frame_column\n break\n\n # Relabel one timepoint\n output_sample = relabel_timepoint_with_map_array(labels, table, column, frame_column, 0)\n\n lazy_arrays = []\n for i in range(labels.shape[0]):\n # build a delayed function call for each timepoint\n lazy_processed_image = delayed(\n partial(relabel_timepoint_with_map_array, labels, table, column, frame_column, i)\n )\n lazy_arrays.append(\n lazy_processed_image()\n )\n\n # build an array of delayed arrays\n dask_arrays = [\n [da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype)]\n if len(output_sample.shape) == 2\n else da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype\n )\n for delayed_reader in lazy_arrays\n ]\n # Stack into one large dask.array\n stack = da.stack(\n dask_arrays,\n axis=0)\n return stack\n else:\n label_list = np.asarray(table['label']).tolist()\n measurement_list = np.asarray(table[column]).tolist()\n\n return relabel_with_map_array(labels, label_list, measurement_list)", "def _binary_sample(image, label, n_samples_per_label, label_count):\n h_idx, w_idx = np.where(image == label)\n replace = True if label_count < n_samples_per_label else False\n rand_idx = np.random.choice(len(h_idx), size=n_samples_per_label, replace=replace)\n # Here we rescale the labels as per input\n return image[h_idx[rand_idx], w_idx[rand_idx]] - 1, h_idx[rand_idx], w_idx[rand_idx]", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights", "def __init_rect_list(self, ind, min_prob = 0.5):\n #bbox_label_pred = self.net.tops['bbox_label'].data[ind]\n #binary_pred = self.net.tops['binary_label'].data[ind]\n bottom_height = self.image_height\n bottom_width = self.image_width\n bbox_label_pred = self.net.tops['bbox_pred'].data[ind]\n binary_pred = self.net.tops['binary_softmax'].data[ind]\n label_pred = self.net.tops['label_softmax'].data[ind]\n \n (_, top_height, top_width) = bbox_label_pred.shape\n y_mul = bottom_height * 1. / top_height\n x_mul = bottom_width * 1. / top_width\n rect_list = []\n for y in xrange(top_height):\n for x in xrange(top_width):\n # corresponds to indices in original image\n cx_orig = x_mul * (x + 0.5)\n cy_orig = y_mul * (y + 0.5)\n\n # we predict a symbol here if p(no label) < x\n if binary_pred[0, y, x] < 0.5:\n k = np.argmax(label_pred[:, y, x]) \n #if label_pred[k, y, x] < 0.2: continue\n\n # apply offsets to get positions in original image\n cx = cx_orig + bbox_label_pred[0, y, x]\n cy = cy_orig + bbox_label_pred[1, y, x]\n w = bbox_label_pred[2, y, x]\n h = bbox_label_pred[3, y, x]\n xmin = cx - w / 2.0\n ymin = cy - h / 2.0\n rect = Rect(xmin, ymin, xmin + w, ymin + h, label=k, prob=label_pred[k, y, x])\n rect_list.append(rect)\n\n return rect_list", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), mx.nd.array([im_scale])", "def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, box_stds=None):\n gt_boxes_coodinate_convert = back_forward_convert(gt_boxes, True) # return [x_c,y_c,w,h,theta,label]\n theta=gt_boxes_coodinate_convert[:,4]\n real_label=gt_boxes_coodinate_convert[:,5]\n\n\n gt_boxes_rec_with_label=np.zeros((gt_boxes.shape[0],6),dtype=np.float32)\n gt_boxes_rec_with_label[:,0]=np.min(gt_boxes[:,0:8:2])#x_min\n gt_boxes_rec_with_label[:,1]=np.min(gt_boxes[:,1:8:2])# y_min\n gt_boxes_rec_with_label[:,2]=np.max(gt_boxes[:,0:8:2])#x_max\n gt_boxes_rec_with_label[:,3]=np.max(gt_boxes[:,1:8:2])#y_max\n\n gt_boxes_rec_with_label[:,4] = theta #真实的旋转角度\n gt_boxes_rec_with_label[:,5]=real_label#gt_boxes[:,-1]#真实的标签\n\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes_rec_with_label[:, :4])\n\n #overlaps = bbox_overlaps(\n # np.ascontiguousarray(rois, dtype=np.float),\n # np.ascontiguousarray(gt_boxes[:, :-1], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n #print('mx_overlap=',max_overlaps)\n labels = gt_boxes_rec_with_label[gt_assignment, -1]#\n # select foreground RoI with FG_THRESH overlap\n\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n # sample rois and labels\n rois = rois[keep_indexes]\n labels = labels[keep_indexes]\n # set labels of bg rois to be 0\n labels[fg_rois_this_image:] = 0\n\n targets = encode_boxes_rotate(ex_rois=rois[:, 1:], gt_rois=gt_boxes_rec_with_label[gt_assignment[keep_indexes], :5])\n bbox_targets = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n bbox_weights = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n for i in range(fg_rois_this_image):\n cls_ind = int(labels[i])\n bbox_targets[i, cls_ind * 5:(cls_ind + 1) * 5] = targets[i]\n bbox_weights[i, cls_ind * 5:(cls_ind + 1) * 5] = 1\n return rois,labels,bbox_targets,bbox_weights\n\n\n\n \"\"\"\n gt_boxes_rec=np.zeros((gt_boxes.shape[0],5),dtype=np.float32)\n #print('gt_boxes=',gt_boxes)\n #print('gt_boxes[:,0:8:2]=',gt_boxes[:,0:8:2] )\n #print('max_x=',np.max(gt_boxes[:,0:8:2]))\n #gt_boxes=back_forward_convert(gt_boxes,True)\n #gt_boxes=forward_convert(gt_boxes,False)\n #\n #print(\"gt_boxes=\",gt_boxes)\n\n gt_boxes_rec[:,0]=np.min(gt_boxes[:,0:8:2])#x_min\n gt_boxes_rec[:,1]=np.min(gt_boxes[:,1:8:2])# y_min\n gt_boxes_rec[:,2]=np.max(gt_boxes[:,0:8:2])#x_max\n gt_boxes_rec[:,3]=np.max(gt_boxes[:,1:8:2])#y_max\n gt_boxes_rec[:,4]=gt_boxes[:,-1]\n\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes_rec[:, :4])#######问题也在这里,带标签\n\n #overlaps = bbox_overlaps(\n # np.ascontiguousarray(rois, dtype=np.float),\n # np.ascontiguousarray(gt_boxes[:, :-1], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n #print('mx_overlap=',max_overlaps)\n labels = gt_boxes_rec[gt_assignment, -1]#\n # select foreground RoI with FG_THRESH overlap\n\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n # sample rois and labels\n rois = rois[keep_indexes]\n labels = labels[keep_indexes]\n # set labels of bg rois to be 0\n labels[fg_rois_this_image:] = 0\n\n targets = encode_boxes_rotate(ex_rois=rois[:, 1:], gt_rois=gt_boxes_rec[gt_assignment[keep_indexes], :5])\n bbox_targets = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n bbox_weights = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n for i in range(fg_rois_this_image):\n cls_ind = int(labels[i])\n bbox_targets[i, cls_ind * 5:(cls_ind + 1) * 5] = targets[i]\n bbox_weights[i, cls_ind * 5:(cls_ind + 1) * 5] = 1\n return rois,labels,bbox_targets,bbox_weights\n \"\"\"", "def __call__(self, src, label):\r\n \"\"\"color distort\"\"\"\r\n # img = random_color_distort(src)\r\n\r\n # print(\"previous label shape = \", label.shape)\r\n target = np.zeros(shape=(label.shape[0],))\r\n\r\n \"\"\"Pyramid Anchor sampling\"\"\"\r\n img, boxes, label = self.random_baiducrop(src, label[:, :4], target)\r\n # print(\"label shape = \", label.shape)\r\n # print('boxes shape =', boxes.shape)\r\n bbox = boxes\r\n # img = mx.nd.array(img)\r\n\r\n \"\"\"color distort\"\"\"\r\n img = mx.nd.array(img)\r\n img = random_color_distort(img)\r\n\r\n # \"\"\"random crop, keep aspect ration=1\"\"\"\r\n # h, w, _ = img.shape\r\n # bbox, crop_size = random_crop_with_constraints(label, (w, h))\r\n # x_offset, y_offset, new_width, new_height = crop_size\r\n # img = mx.image.fixed_crop(img, x_offset, y_offset, new_width, new_height)\r\n\r\n \"\"\"resize with random interpolation\"\"\"\r\n h, w, _ = img.shape\r\n interp = np.random.randint(0, 5)\r\n img = gimage.imresize(img, self._width, self._height, interp=interp)\r\n bbox = gbbox.resize(bbox, (w, h), (self._width, self._height))\r\n\r\n \"\"\"random horizontal flip\"\"\"\r\n h, w, _ = img.shape\r\n img, flips = gimage.random_flip(img, px=0.5)\r\n bbox = gbbox.flip(bbox, (w, h), flip_x=flips[0])\r\n\r\n \"\"\"To Tensor & Normalization\"\"\"\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n\r\n if self._anchors is None:\r\n return img, bbox\r\n\r\n # @TODO: generating training target so cpu workers can help reduce the workload on gpu\r\n face_anchors, head_anchors, body_anchors = self._anchors\r\n gt_bboxes = mx.nd.array(bbox[:, :4]).expand_dims(0)\r\n gt_ids = mx.nd.zeros((1, gt_bboxes.shape[1], 1), dtype=gt_bboxes.dtype)\r\n\r\n face_cls_targets, face_box_targets, _ = self._target_generator(\r\n face_anchors, None, gt_bboxes, gt_ids)\r\n\r\n head_cls_targets, head_box_targets, _ = self._target_generator(\r\n head_anchors, None, gt_bboxes, gt_ids)\r\n\r\n body_cls_targets, body_box_targets, _ = self._target_generator(\r\n body_anchors, None, gt_bboxes, gt_ids)\r\n\r\n return img, \\\r\n face_cls_targets[0], head_cls_targets[0], body_cls_targets[0], \\\r\n face_box_targets[0], head_box_targets[0], body_box_targets[0]" ]
[ "0.6478101", "0.6458288", "0.5927219", "0.58900374", "0.55786675", "0.55571175", "0.55210066", "0.55083823", "0.5440751", "0.54380363", "0.54371274", "0.54321146", "0.5426156", "0.54251885", "0.5415475", "0.5403779", "0.540325", "0.5401663", "0.5368239", "0.5363603", "0.53531015", "0.53447586", "0.5344055", "0.5336962", "0.53303707", "0.5320941", "0.5316991", "0.53135157", "0.53023666", "0.5295811" ]
0.68045133
1
Resample fine and coarse segmentation tensors to the given bounding box and derive labels for each pixel of the bounding box
def resample_fine_and_coarse_segm_tensors_to_bbox( fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox ): x, y, w, h = box_xywh_abs w = max(int(w), 1) h = max(int(h), 1) # coarse segmentation coarse_segm_bbox = F.interpolate( coarse_segm, (h, w), mode="bilinear", align_corners=False ).argmax(dim=1) # combined coarse and fine segmentation labels = ( F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) * (coarse_segm_bbox > 0).long() ) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resample_fine_and_coarse_segm_tensors_to_bbox(\n fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox\n):\n x, y, w, h = box_xywh_abs\n w = max(int(w), 1)\n h = max(int(h), 1)\n # coarse segmentation\n coarse_segm_bbox = F.interpolate(\n coarse_segm,\n (h, w),\n mode=\"bilinear\",\n align_corners=False,\n ).argmax(dim=1)\n # combined coarse and fine segmentation\n labels = (\n F.interpolate(fine_segm, (h, w), mode=\"bilinear\", align_corners=False).argmax(dim=1)\n * (coarse_segm_bbox > 0).long()\n )\n return labels", "def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox):\n x, y, w, h = box_xywh_abs\n w = max(int(w), 1)\n h = max(int(h), 1)\n labels = F.interpolate(coarse_segm, (h, w), mode=\"bilinear\", align_corners=False).argmax(dim=1)\n return labels", "def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox):\n x, y, w, h = box_xywh_abs\n w = max(int(w), 1)\n h = max(int(h), 1)\n labels = F.interpolate(coarse_segm, (h, w), mode=\"bilinear\", align_corners=False).argmax(dim=1)\n return labels", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n\n # print(gt_boxes)\n # fang[-1] ok\n\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n all_rois[:, 1:5].data,\n gt_boxes[:, :4].data)\n max_overlaps, gt_assignment = overlaps.max(1)\n labels = gt_boxes[gt_assignment, [4]]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = (max_overlaps >= cfg.TRAIN.FG_THRESH).nonzero().view(-1)\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = ((max_overlaps < cfg.TRAIN.BG_THRESH_HI) + (max_overlaps >= cfg.TRAIN.BG_THRESH_LO) == 2).nonzero().view(-1)\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.numel() > 0 and bg_inds.numel() > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.numel())\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(fg_rois_per_image), replace=False)).long().cuda()]\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.numel() < bg_rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_rois_per_image), replace=to_replace)).long().cuda()]\n elif fg_inds.numel() > 0:\n to_replace = fg_inds.numel() < rois_per_image\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = rois_per_image\n elif bg_inds.numel() > 0:\n to_replace = bg_inds.numel() < rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = torch.cat([fg_inds, bg_inds], 0)\n \n # Select sampled values from various arrays:\n labels = labels[keep_inds].contiguous()\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n # print(int(fg_rois_per_image)) -> 16\n\n rois = all_rois[keep_inds].contiguous()\n roi_scores = all_scores[keep_inds].contiguous()\n\n\n\n bbox_target_data, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, \\\n back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data\\\n = _compute_targets(rois[:, 1:5].data, gt_boxes[gt_assignment[keep_inds]][:, :4].data, labels.data,\\\n gt_boxes[gt_assignment[keep_inds]][:, 5:9].data, gt_boxes[gt_assignment[keep_inds]][:, 9:13].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 13:15].data, gt_boxes[gt_assignment[keep_inds]][:, 15:19].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 19:23].data, gt_boxes[gt_assignment[keep_inds]][:, 23:25].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 25:27].data)\n\n bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights \\\n = _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, \\\n front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data)\n \n \n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img = img.astype(np.float32)\n\n if self.augmentation:\n img = self.random_color_aug(img)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype)\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), cls_target, box_target, box_mask", "def __call__(self, src, label, segm):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n segm = [tmask.resize(polys, (w, h), (img.shape[1], img.shape[0])) for polys in segm]\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n segm = [tmask.flip(polys, (w, h), flip_x=flips[0]) for polys in segm]\n\n # gt_masks (n, im_height, im_width) of uint8 -> float32 (cannot take uint8)\n masks = [mx.nd.array(tmask.to_mask(polys, (w, h))) for polys in segm]\n # n * (im_height, im_width) -> (n, im_height, im_width)\n masks = mx.nd.stack(*masks, axis=0)\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype), masks\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), masks, cls_target, box_target, box_mask", "def _preprocess_for_training(image, gt_mask):\n rand = np.random.rand(4)\n\n \"\"\" step 1. random cropping \"\"\"\n if rand[3] > 0.5:\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0],\n dtype=tf.float32,\n shape=[1, 1, 4])\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.7, 1.0),\n max_attempts=100,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n image = tf.slice(image, bbox_begin, bbox_size)\n gt_mask = tf.slice(gt_mask, bbox_begin, bbox_size)\n image.set_shape([None, None, 3])\n\n \"\"\" step 2. resize \"\"\"\n # ratio = np.random.uniform(0.8, 1.2)\n # new_width = int(300.0 * ratio)\n # new_height = int(400.0 * ratio)\n # print(new_width, new_height)\n\n new_width = 400\n new_height = 400\n\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False)\n image = tf.squeeze(image, axis=[0])\n\n gt_mask = tf.expand_dims(gt_mask, 0)\n gt_mask = tf.cast(gt_mask, tf.float32)\n gt_mask = tf.image.resize_bilinear(gt_mask, [new_height, new_width], align_corners=False)\n gt_mask = tf.squeeze(gt_mask, axis=[0])\n\n image = tf.reshape(image, [new_height, new_width, 3])\n gt_mask = tf.reshape(gt_mask, [new_height, new_width, 1])\n\n \"\"\" step 3. random flipping \"\"\"\n flip_thresh = tf.constant(rand[1], dtype=tf.float32)\n val = tf.constant(0.5, dtype=tf.float32)\n image, gt_mask = tf.cond(tf.greater_equal(flip_thresh, val),\n lambda : (_flip_image(image),\n _flip_gt_masks(gt_mask)),\n lambda : (image, gt_mask))\n\n\n \"\"\" step 4. random rotation \"\"\"\n rotate_thresh = tf.constant(rand[2], dtype=tf.float32)\n image, gt_mask = tf.cond(tf.greater_equal(rotate_thresh, val),\n lambda : (_rotate_images(image, gt_mask, rand[3])),\n lambda : (image, gt_mask))\n\n\n \"\"\" step 5. convert [0, 255] to [0.0, 1.0] \"\"\"\n image = tf.image.convert_image_dtype(image, tf.float32)\n gt_mask = tf.image.convert_image_dtype(gt_mask, tf.float32)\n image = image / 255.0\n gt_mask = gt_mask / 255.0\n\n return image, gt_mask", "def get_x_y(self, indices: List[int], raw=False):\n\n annotations = []\n batch_of_input_images, batch_of_mask_sets, batch_of_bbox_sets, batch_of_label_sets, num_labels = super(RetinaDataset, self)._get_x_y(\n indices=indices,\n autoscale=True,\n use_masks=False,\n do_preprocessing=True,\n downscale=True\n )\n\n # Extract boxes\n for batch, sets in enumerate(zip(batch_of_input_images, batch_of_bbox_sets, batch_of_label_sets)):\n image, box_set, label_set = sets\n annotations.append({\n 'bboxes': box_set,\n 'labels': label_set\n })\n\n # Uncomment for DEBUG\n # ==========================\n # # ==========================\n # if self.is_training_dataset:\n # draw = image.copy()\n #\n # draw[..., 0] += 123.68 # R\n # draw[..., 1] += 116.779 # G\n # draw[..., 2] += 103.939 # B\n #\n # for label, box in zip(label_set, box_set):\n # draw_box(draw, [int(box[1]), int(box[0]), int(box[3]), int(box[2])], color=(255, 200, 0))\n # caption = \"{} {:.3f}\".format(label, 0)\n #\n # # print(self.labels.index(obj['name']) )\n #\n # cv2.putText(\n # img=draw,\n # text=caption,\n # org=(int(box[0]), int(box[1]) - 10),\n # fontFace=cv2.FONT_HERSHEY_PLAIN,\n # fontScale=1,\n # color=(255, 200, 0),\n # thickness=1)\n #\n # from matplotlib import pyplot as plt\n # fig = plt.figure(figsize=(10,15))\n # plt.axis('off')\n # try:\n # plt.imshow(draw.astype(np.uint8))\n # except:\n # pass\n # # plt.show()\n # Image.fromarray(draw.astype('uint8')).save('train_images/{}.png'.format(randint(0, 1000)))\n # # with open('train_images/{}.png'.format(randint(0, 1000)), 'wb') as f:\n # # fig.savefig(f, format='png')\n\n # # exit(0)\n # ==========================\n # ==========================\n\n # Compute regression targets\n targets = (batch_of_input_images, annotations) if raw else self.compute_targets(batch_of_input_images, annotations)\n # batch_of_input_images = self.compute_inputs(batch_of_input_images)\n return batch_of_input_images, list(targets)", "def preprocess_pair(templar_buffer, search_buffer, templar_bbox, search_bbox, num_channels, is_training=True):\n\n '''\n *********************************** Templar image ****************************************\n * Get tight bbox, randomly shift +-8 pixels\n * Pad image to [2500, 2500] with mean RGB values\n * Crop to 256x256:\n * get tight bbox [w, h]\n * compute context margin p = (w+h)/4\n * extend bbox to [w+2p, h+2p], and get min(w+2p, h+2p)\n * extend bbox to [D, D] by adding the shorter side with max(w+2p, h+2p) - min(w+2p, h+2p)\n * crop [D, D] and rescale to [128, 128], get the rescale factor [s]\n * pad boundaries to [256,256] with mean RGB values\n \n \n *********************************** Search image ****************************************\n * Get tight bbox of the corresponding object in templar image\n * Randomly rescale in range(s*0.8, s*1.2), and update bbox position; [s] is computed during pre-process templar image\n * Pad image to [2500, 2500] with mean RGB values\n * Set bbox as the center and crop the image to [256, 256] so that search target is centered in the image\n '''\n\n # decode image buffers\n templar_img = tf.image.decode_jpeg(templar_buffer, channels=num_channels) # uint8\n search_img = tf.image.decode_jpeg(search_buffer, channels=num_channels) # uint8\n templar_bbox = tf.cast(templar_bbox, tf.int32)\n search_bbox = tf.cast(search_bbox, tf.int32)\n\n def return_zero_pad(x): return [0, tf.abs(x)]\n def return_iden_no_pad(x): return [x, 0]\n def return_maxW_pad(x, w_max): return [w_max - 1, x - (w_max - 1)]\n def return_maxH_pad(x, h_max): return [h_max - 1, x - (h_max - 1)]\n def flip_bbox(bbox, img_w):\n '''\n :param bbox: original bbox [xmin, ymin, xmax, ymax]\n :param img_w:\n :return: flipped bbox\n '''\n new_bbox = []\n new_bbox.append(img_w - bbox[2])\n new_bbox.append(bbox[1])\n new_bbox.append(img_w - bbox[0])\n new_bbox.append(bbox[3])\n\n return new_bbox\n\n ######################################## Process Templar #############################################\n # Get tight bbox, always keep the target at the center\n #templar_bbox = distort_bounding_box(input_bbox=templar_bbox, random_shift=8) # new box [xmin, ymin, xmax, ymax]\n # pad border in case distorted bbox out of boundary\n mean_rgb = tf.reduce_mean(tf.cast(templar_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.uint8)\n #templar_img = templar_img - mean_rgb\n #pad_border, pad_border = 10, 10\n #templar_img = tf.pad(tensor=templar_img, paddings=[[pad_border, pad_border], [pad_border, pad_border],[0, 0]],\n # mode='CONSTANT', name=None, constant_values=0)\n #templar_img = templar_img + mean_rgb\n # update tight bbox position, the size stays the same, the 4 corners are updated\n #templar_bbox[0] = templar_bbox[0] + pad_border\n #templar_bbox[1] = templar_bbox[1] + pad_border\n #templar_bbox[2] = templar_bbox[2] + pad_border\n #templar_bbox[3] = templar_bbox[3] + pad_border\n bbox_h = templar_bbox[3] - templar_bbox[1]\n bbox_w = templar_bbox[2] - templar_bbox[0]\n # save the (distorted) tight bbox for display\n tight_bbox = []\n tight_bbox.append(templar_bbox[0])\n tight_bbox.append(templar_bbox[1])\n tight_bbox.append(templar_bbox[2])\n tight_bbox.append(templar_bbox[3])\n p = tf.cast((bbox_h + bbox_w) / 4, tf.int32) # get context margin and compute new bbox\n argmin_dim = tf.math.argmin([bbox_w, bbox_h], axis=0) # 0: shorter in width, 1: shorter in height\n extend_w_cond = tf.equal(argmin_dim, 0) # true if extend in width dim, otherwise extend in height dim\n extend_side_cond = tf.equal(tf.math.abs(bbox_w-bbox_h) % 2, 0) # if true, extend evenly on both side\n extend_val_left = tf.cond(extend_side_cond,\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32),\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32) + 1)\n extend_val_right = tf.cast(tf.math.abs(bbox_w-bbox_h) / 2, tf.int32)\n # get a rect bbox by extending the shorter side\n templar_bbox_new = tf.cond(extend_w_cond, lambda: extend_bbox_w(templar_bbox, extend_val_left, extend_val_right),\n lambda: extend_bbox_h(templar_bbox, extend_val_left, extend_val_right))\n ## add context margin\n templar_bbox_new = [templar_bbox_new[0]-p, templar_bbox_new[1]-p, templar_bbox_new[2]+p, templar_bbox_new[3]+p]\n tight_bbox[0] = tight_bbox[0] - templar_bbox_new[0] # [xmin, ymin, xmax, ymax]\n tight_bbox[1] = tight_bbox[1] - templar_bbox_new[1]\n tight_bbox[2] = tight_bbox[2] - templar_bbox_new[0]\n tight_bbox[3] = tight_bbox[3] - templar_bbox_new[1]\n # here the rectangular bbox might already out of boundary, must pad precise number of pixels on left/up\n img_height = tf.shape(templar_img)[0]\n img_width = tf.shape(templar_img)[1]\n [new_x_min, pad_w_begin] = tf.cond(templar_bbox_new[0] < 0, lambda :return_zero_pad(templar_bbox_new[0]), lambda :return_iden_no_pad(templar_bbox_new[0]))\n [new_x_max, pad_w_end] = tf.cond(templar_bbox_new[2] >= img_width, lambda :return_maxW_pad(templar_bbox_new[2], img_width), lambda :return_iden_no_pad(templar_bbox_new[2]))\n [new_y_min, pad_h_begin] = tf.cond(templar_bbox_new[1] < 0, lambda :return_zero_pad(templar_bbox_new[1]), lambda :return_iden_no_pad(templar_bbox_new[1]))\n [new_y_max, pad_h_end] = tf.cond(templar_bbox_new[3] >= img_height, lambda :return_maxH_pad(templar_bbox_new[3], img_height), lambda :return_iden_no_pad(templar_bbox_new[3]))\n # do paddings, only effective if out of boundary\n templar_img = templar_img - mean_rgb\n templar_img = tf.pad(tensor=templar_img,\n paddings=[[pad_h_begin, pad_h_end + 10], [pad_w_begin, pad_w_end + 10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n templar_img = templar_img + mean_rgb\n # crop the image\n croped_templar = tf.image.crop_to_bounding_box(image=templar_img, offset_height=new_y_min,\n offset_width=new_x_min,\n target_height=templar_bbox_new[3]-templar_bbox_new[1],\n target_width=templar_bbox_new[2]-templar_bbox_new[0])\n with tf.control_dependencies([tf.debugging.assert_equal(templar_bbox_new[3] - templar_bbox_new[1],\n templar_bbox_new[2] - templar_bbox_new[0])]):\n # rescale to [127, 127], get the scale factor\n scale_s = 127.0 / tf.cast(templar_bbox_new[3] - templar_bbox_new[1], tf.float32)\n # rescale the tight bbox\n tight_temp_bbox = rescale_bbox(tight_bbox, scale_s)\n scale_s = tf.debugging.assert_all_finite(t=scale_s, msg='scale factor not a number!')\n croped_templar = tf.image.resize_bilinear(images=tf.expand_dims(croped_templar, axis=0), size=[127, 127])\n croped_templar = tf.squeeze(croped_templar, axis=0) # [h, w, 3]\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(croped_templar)[0], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[1], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[2], 3)]):\n templar_final = tf.identity(croped_templar)\n\n ######################################## Process Search image #############################################\n # Get rgb mean\n mean_rgb = tf.reduce_mean(tf.cast(search_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.float32)\n # Get random scale factor\n rescale_factor = scale_s * tf.random.uniform(shape=[], minval=0.8, maxval=1.2, dtype=tf.float32)\n rescale_factor = tf.debugging.assert_all_finite(t=rescale_factor, msg='rescale_factor factor not a number!')\n # Get rescaled bbox position, and the image\n search_bbox = rescale_bbox(search_bbox, rescale_factor)\n new_height = tf.cast(tf.cast(tf.shape(search_img)[0], tf.float32) * rescale_factor, tf.int32)\n new_width = tf.cast(tf.cast(tf.shape(search_img)[1], tf.float32) * rescale_factor, tf.int32)\n search_img = tf.image.resize_bilinear(images=tf.expand_dims(search_img, axis=0), size=[new_height, new_width])\n search_img = tf.squeeze(search_img, axis=0) # [h, w, 3]\n ### randomly shift bbox +-64 pixels, get the shift values and new bbox center\n search_bbox, h_shift, w_shift = distort_bounding_box(input_bbox=search_bbox, random_shift=32) # new box [xmin, ymin, xmax, ymax], h_shift, w_shift\n ### crop around the center of the bbox to [255, 255], if out of boundary, pad with mean rgb value\n img_width = tf.shape(search_img)[1]\n img_height = tf.shape(search_img)[0]\n x_center = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) + search_bbox[0]\n y_center = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) + search_bbox[1]\n x_min, x_max = x_center - 127, x_center + 127\n y_min, y_max = y_center - 127, y_center + 127\n [new_x_min, pad_w_begin] = tf.cond(x_min < 0, lambda :return_zero_pad(x_min), lambda :return_iden_no_pad(x_min))\n [new_x_max, pad_w_end] = tf.cond(x_max >= img_width, lambda :return_maxW_pad(x_max, img_width), lambda :return_iden_no_pad(x_max))\n [new_y_min, pad_h_begin] = tf.cond(y_min < 0, lambda :return_zero_pad(y_min), lambda :return_iden_no_pad(y_min))\n [new_y_max, pad_h_end] = tf.cond(y_max >= img_height, lambda :return_maxH_pad(y_max, img_height), lambda :return_iden_no_pad(y_max))\n # do paddings, only effective if out of boundary\n search_img = search_img - mean_rgb\n search_img = tf.pad(tensor=search_img, paddings=[[pad_h_begin, pad_h_end+10], [pad_w_begin, pad_w_end+10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n search_img = search_img + mean_rgb\n # crop\n search_final = tf.image.crop_to_bounding_box(image=search_img, offset_height=new_y_min, offset_width=new_x_min,\n target_height=255, target_width=255)\n ## get tight bbox within the rescaled search img [xmin, ymin, xmax, ymax]\n bbox_h_half = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) # might be zero\n bbox_w_half = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) # might be zero\n tight_search_bbox = []\n tight_search_bbox.append(127 - bbox_w_half - w_shift) # xmin\n tight_search_bbox.append(127 - bbox_h_half - h_shift) # ymin\n tight_search_bbox.append(127 + bbox_w_half - w_shift) # xmax\n tight_search_bbox.append(127 + bbox_h_half - h_shift) # ymax\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(search_final)[0], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[1], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[2], 3)]):\n search_final = tf.identity(search_final)\n\n ######################################## Process Score Map GT #############################################\n # [17, 17, 1], [17, 17, 1]\n # consider 8 x (center - offset) <= 16 as positives, stride=8; also note that target in search image is already shifted\n t_center_x = 8 - tf.cast(w_shift / 8, tf.int32)\n t_center_y = 8 - tf.cast(h_shift / 8, tf.int32)\n score, score_weight = tf.py_func(func=build_gt_py, inp=[t_center_x, t_center_y], Tout=[tf.int32, tf.float32],\n stateful=True, name=None)\n \"\"\"\n score = tf.zeros([17, 17, 1], dtype=tf.int32)\n delta = tf.sparse.SparseTensor(indices=[[t_center_y, t_center_x, 0]], values=[1], dense_shape=[17,17,1])\n score = score + tf.sparse.to_dense(delta)\n score = tf.expand_dims(score, axis=0) # [1,17,17,1]\n dila_structure = np.array([[False, False, True, False, False],\n [False, True, True, True, False],\n [True, True, True, True, True],\n [False, True, True, True, False],\n [False, False, True, False, False]], dtype=bool)\n dila_structure = dila_structure.astype(np.int32)\n dila_structure = np.expand_dims(dila_structure, axis=-1) # [5,5,1]\n score = tf.nn.dilation2d(input=score, filter=dila_structure, strides=[1,1,1,1], rates=[1,1,1,1], padding='SAME')\n num_total = 17 * 17\n num_positive = tf.reduce_sum(score)\n num_negative = num_total - num_positive\n weight_positive = tf.cast(num_negative, tf.float32) / tf.cast(num_total, tf.float32)\n weight_negative = tf.cast(num_positive, tf.float32) / tf.cast(num_total, tf.float32)\n mat_positive = tf.cast(score, tf.float32) * weight_positive # float\n mat_negative = (1.0 - tf.cast(score, tf.float32)) * weight_negative # float\n score_weight = mat_positive + mat_negative\n score = tf.squeeze(score, 0)\n score_weight = tf.squeeze(score_weight, 0)\n \"\"\"\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(score)[0], 17),\n tf.debugging.assert_equal(tf.shape(score)[1], 17),\n tf.debugging.assert_equal(tf.shape(score)[2], 1),\n tf.debugging.assert_equal(tf.shape(score_weight)[0], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[1], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[2], 1)]):\n score = tf.identity(score)\n score_weight = tf.identity(score_weight)\n\n ################################### Randomly flip templar/search images ####################################\n flip_v = tf.random.uniform(shape=[]) # scalar\n flip_v = tf.greater_equal(flip_v, 0.5)\n templar_final = tf.cond(flip_v, lambda : tf.image.flip_left_right(image=templar_final), lambda :templar_final)\n search_final = tf.cond(flip_v, lambda: tf.image.flip_left_right(image=search_final), lambda: search_final)\n score = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score), lambda :score)\n score_weight = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score_weight), lambda :score_weight)\n tight_search_bbox = tf.cond(flip_v, lambda :flip_bbox(tight_search_bbox, 255), lambda :tight_search_bbox)\n\n templar_final = mean_image_subtraction(templar_final, _CHANNEL_MEANS, num_channels)\n search_final = mean_image_subtraction(search_final, _CHANNEL_MEANS, num_channels)\n\n return templar_final, search_final, score, score_weight, tight_temp_bbox, tight_search_bbox", "def decode(yolo_output, num_of_anchor_bbox, classes, strides, anchors, index):\n \"\"\" takes in tensor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) \"\"\"\n \"\"\" returns tesnor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) \"\"\"\n \n # takes in original anchors and process to scaled anchors based on strides for respective scales\n anchors_scaled = (np.array(anchors).T/strides).T\n \n # obtain dimensions from yolo_output\n conv_shape = tf.shape(yolo_output)\n batch_size = conv_shape[0]\n grid_size = conv_shape[1:3]\n\n # reshape yolo_output\n yolo_output = tf.reshape(yolo_output, (batch_size, grid_size[0], grid_size[1], num_of_anchor_bbox, 5 + classes))\n\n # split yolo_output along last axis to extract features\n raw_dx_dy, raw_dw_dh, raw_objectiveness, raw_class_probs = tf.split(yolo_output, (2, 2, 1, classes), axis = -1)\n\n # create grid where grid[x][y] == (y, x)\n xy_grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))\n\n # reshape to [gx, gy, 1, 2] and cast to float32 data type\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis = -1), axis = 2) \n xy_grid = tf.cast(xy_grid, tf.float32)\n\n # calculate the center position of the prediction box (train_input_size):\n pred_xy = (tf.sigmoid(raw_dx_dy) + xy_grid) * strides[index]\n\n # calculate the length and width of the prediction box (train_input_size):\n pred_wh = (tf.exp(raw_dw_dh) * anchors_scaled[index]) * strides[index]\n\n # concatenate pred_xy and pred_wh\n pred_xywh = tf.concat([pred_xy, pred_wh], axis = -1)\n\n # objectiveness score\n pred_objectiveness = tf.sigmoid(raw_objectiveness) \n\n # class probabilities\n pred_prob = tf.sigmoid(raw_class_probs) \n\n # concatenate decoded results\n pred = tf.concat([pred_xywh, pred_objectiveness, pred_prob], axis = -1)\n\n return pred", "def preprocessing(image_data, final_height, final_width, label_id, apply_augmentation=False, evaluate=False):\n img = image_data[\"image\"]\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = tf.cast(image_data[\"objects\"][\"label\"] + 1, tf.int32) # add 1 for background\n\n # delete gt_boxe and gt_label entrys that do not belong to label_id\n person_or_not = gt_labels == (label_id + 1) # + 1 since the lable background is added\n gt_boxes = gt_boxes[person_or_not]\n gt_labels = gt_labels[person_or_not]\n gt_labels = gt_labels - label_id # since just one lable is used it is identified with 1\n\n if evaluate:\n not_diff = tf.logical_not(image_data[\"objects\"][\"is_difficult\"])\n gt_boxes = gt_boxes[not_diff]\n gt_labels = gt_labels[not_diff]\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = tf.image.resize(img, (final_height, final_width))\n if apply_augmentation:\n img, gt_boxes = randomly_apply_operation(flip_horizontally, img, gt_boxes)\n return img, gt_boxes, gt_labels", "def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, box_stds=None):\n gt_boxes_coodinate_convert = back_forward_convert(gt_boxes, True) # return [x_c,y_c,w,h,theta,label]\n theta=gt_boxes_coodinate_convert[:,4]\n real_label=gt_boxes_coodinate_convert[:,5]\n\n\n gt_boxes_rec_with_label=np.zeros((gt_boxes.shape[0],6),dtype=np.float32)\n gt_boxes_rec_with_label[:,0]=np.min(gt_boxes[:,0:8:2])#x_min\n gt_boxes_rec_with_label[:,1]=np.min(gt_boxes[:,1:8:2])# y_min\n gt_boxes_rec_with_label[:,2]=np.max(gt_boxes[:,0:8:2])#x_max\n gt_boxes_rec_with_label[:,3]=np.max(gt_boxes[:,1:8:2])#y_max\n\n gt_boxes_rec_with_label[:,4] = theta #真实的旋转角度\n gt_boxes_rec_with_label[:,5]=real_label#gt_boxes[:,-1]#真实的标签\n\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes_rec_with_label[:, :4])\n\n #overlaps = bbox_overlaps(\n # np.ascontiguousarray(rois, dtype=np.float),\n # np.ascontiguousarray(gt_boxes[:, :-1], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n #print('mx_overlap=',max_overlaps)\n labels = gt_boxes_rec_with_label[gt_assignment, -1]#\n # select foreground RoI with FG_THRESH overlap\n\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n # sample rois and labels\n rois = rois[keep_indexes]\n labels = labels[keep_indexes]\n # set labels of bg rois to be 0\n labels[fg_rois_this_image:] = 0\n\n targets = encode_boxes_rotate(ex_rois=rois[:, 1:], gt_rois=gt_boxes_rec_with_label[gt_assignment[keep_indexes], :5])\n bbox_targets = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n bbox_weights = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n for i in range(fg_rois_this_image):\n cls_ind = int(labels[i])\n bbox_targets[i, cls_ind * 5:(cls_ind + 1) * 5] = targets[i]\n bbox_weights[i, cls_ind * 5:(cls_ind + 1) * 5] = 1\n return rois,labels,bbox_targets,bbox_weights\n\n\n\n \"\"\"\n gt_boxes_rec=np.zeros((gt_boxes.shape[0],5),dtype=np.float32)\n #print('gt_boxes=',gt_boxes)\n #print('gt_boxes[:,0:8:2]=',gt_boxes[:,0:8:2] )\n #print('max_x=',np.max(gt_boxes[:,0:8:2]))\n #gt_boxes=back_forward_convert(gt_boxes,True)\n #gt_boxes=forward_convert(gt_boxes,False)\n #\n #print(\"gt_boxes=\",gt_boxes)\n\n gt_boxes_rec[:,0]=np.min(gt_boxes[:,0:8:2])#x_min\n gt_boxes_rec[:,1]=np.min(gt_boxes[:,1:8:2])# y_min\n gt_boxes_rec[:,2]=np.max(gt_boxes[:,0:8:2])#x_max\n gt_boxes_rec[:,3]=np.max(gt_boxes[:,1:8:2])#y_max\n gt_boxes_rec[:,4]=gt_boxes[:,-1]\n\n overlaps = bbox_overlaps(rois[:, 1:], gt_boxes_rec[:, :4])#######问题也在这里,带标签\n\n #overlaps = bbox_overlaps(\n # np.ascontiguousarray(rois, dtype=np.float),\n # np.ascontiguousarray(gt_boxes[:, :-1], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n #print('mx_overlap=',max_overlaps)\n labels = gt_boxes_rec[gt_assignment, -1]#\n # select foreground RoI with FG_THRESH overlap\n\n fg_indexes = np.where(max_overlaps >= fg_overlap)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))\n # sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_this_image:\n fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)\n\n # select background RoIs as those within [0, FG_THRESH)\n bg_indexes = np.where(max_overlaps < fg_overlap)[0]\n # compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_this_image = rois_per_image - fg_rois_this_image\n bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))\n # sample bg rois without replacement\n if len(bg_indexes) > bg_rois_this_image:\n bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n # pad more bg rois to ensure a fixed minibatch size\n while len(keep_indexes) < rois_per_image:\n gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))\n gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])\n\n # sample rois and labels\n rois = rois[keep_indexes]\n labels = labels[keep_indexes]\n # set labels of bg rois to be 0\n labels[fg_rois_this_image:] = 0\n\n targets = encode_boxes_rotate(ex_rois=rois[:, 1:], gt_rois=gt_boxes_rec[gt_assignment[keep_indexes], :5])\n bbox_targets = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n bbox_weights = np.zeros((rois_per_image, 5 * num_classes), dtype=np.float32)\n for i in range(fg_rois_this_image):\n cls_ind = int(labels[i])\n bbox_targets[i, cls_ind * 5:(cls_ind + 1) * 5] = targets[i]\n bbox_weights[i, cls_ind * 5:(cls_ind + 1) * 5] = 1\n return rois,labels,bbox_targets,bbox_weights\n \"\"\"", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\r\n # MP:\r\n # overlaps: (no_rois x no_gt_bbox) each row gives the overlap of the proposed region with the gt boxes. Overlap is measured as: (overlapping area)/(union area).\r\n # gt_assignment: determines which of the gt boxes has more overlap with the regions\r\n # max_overlaps: takes the maximum overlap of a region\r\n # labels: defines which which gt box corresponds best with the region and assigns its label to the region\r\n # fg_rois_per_image = 8\r\n # overlaps: (rois x gt_boxes)\r\n\r\n # MP: bbox_overlaps rewritten as c_bbox_overlaps\r\n #overlaps =c_bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n # \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n overlaps = bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n # MP: which column index has maximum value\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_boxes[gt_assignment, 4]\r\n\r\n\r\n # MP: Extract RoIs where overlap >= FG_THRESH\r\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\r\n\r\n # Guard against the case when an image has fewer than fg_rois_per_image (i.e. 8)\r\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\r\n\r\n # Sample foreground regions without replacement\r\n if fg_inds.size > 0:\r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\r\n\r\n # MP: Extract RoIs where overlap in [BG_THRESH_LO, BG_THRESH_HI), i.e. [0.0, 0.5)\r\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\r\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\r\n\r\n # Compute number of background RoIs to take from this image (guarding\r\n # against there being fewer than desired)\r\n # MP: Take the no of bg_inds such that fg_inds.shape + bg_inds.shape = 32\r\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\r\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\r\n if bg_inds.size > 0:\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\r\n\r\n\r\n # MP: concatenate the fg_inds and bg_inds, such that keep_inds.shape = 32\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n # MP: obtain the labels set the ones corresponding to bg_inds to zero\r\n labels = labels[keep_inds]\r\n labels[int(fg_rois_per_this_image):] = 0\r\n\r\n # MP: select the 32 rois (fg & bg) from the 2000+ rois with the keep_inds\r\n rois = all_rois[keep_inds]\r\n # MP: fg rois\r\n rois_pos = np.zeros((fg_inds.size, 5), dtype=np.float32) #because return rois_pos as top ---> allocate memory for it\r\n rois_pos[:, :] = all_rois[fg_inds]\r\n gt_assignment_pos = gt_assignment[fg_inds]\r\n\r\n # MP: compute diff to approximate bbox to ground truth\r\n bbox_target_data = _compute_targets(\r\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\r\n\r\n # MP: set the diff values in a matrix where each row corresponds to a foreground bbox\r\n # and the values are stored starting at the index of the label.\r\n # Therefore number of columns: 4*(no labels)\r\n # The bg bboxes are also included in rows, but have all values equal to zero.\r\n bbox_targets, bbox_inside_weights = \\\r\n _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n\r\n '''\r\n # MP: printing and saving files\r\n print \"overlaps with size {}: {}\".format(overlaps.shape, overlaps)\r\n print \"gt_assignment with size {}: {}\".format(gt_assignment.shape, gt_assignment)\r\n print \"max_overlaps with size{}: {}\".format(max_overlaps.shape, max_overlaps)\r\n print \"labels with size{}: {}\".format(labels.shape, labels)\r\n print \"bg_inds with size{}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"bg_rois_per_this_image: {}\".format(bg_rois_per_this_image)\r\n print \"bg_inds with shape {}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"fg_inds with size {}: {}\".format(fg_inds.shape, fg_inds)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois wiht shape {}: {}\".format(rois.shape, rois)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"gt_assignment_pos wiht shape {}: {}\".format(gt_assignment_pos.shape, gt_assignment_pos)\r\n print \"bbox_target_data wiht shape {}: {}\".format(bbox_target_data.shape, bbox_target_data)\r\n print \"diff: {}\".format(rois_pos[:,:] + bbox_target_data[0:fg_inds.size,:])\r\n print \"bbox_targets with size {}: {}\".format(bbox_targets.shape, bbox_targets)\r\n print \"bbox_inside_weights with size {}: {}\".format(bbox_inside_weights.shape, bbox_inside_weights)\r\n\r\n np.savetxt('bbox_targets.txt', bbox_targets, delimiter=',')\r\n np.savetxt('bbox_inside_weights.txt', bbox_inside_weights, delimiter=',')\r\n '''\r\n\r\n return labels, rois, bbox_targets, bbox_inside_weights, gt_boxes[gt_assignment[keep_inds], :], rois_pos, gt_assignment_pos", "def _extract_sample(self, features, masks, imin, imax, shapev, needslabels=False, one_hot=True):\n\n # prepare containers\n tempdata = np.zeros([len(features)] + self.w, dtype=np.float32)\n featuredata = [f.squeeze() for f in features]\n templabels = []\n\n # accumulate mean and std for normalization\n if self.whiten and not self.whiten_subvolumes:\n numvoxs = [\n np.prod([s if g is None else g for g, s in zip(self.presize_for_normalization, f.squeeze().shape)]) for\n f in featuredata]\n means = [np.sum(f) * 1.0 / n for f, n in zip(featuredata, numvoxs)]\n stddevs = [np.sqrt(np.abs(np.mean((featuredata[i] - means[i]) ** 2))) for i in range(len(featuredata))]\n\n if np.sum(self.deform) + np.sum(self.rotation) + np.sum(self.scaling) + np.sum(\n self.shift) == 0 and not self.interpolate_always: # No deformation/scaling/rotation\n # infer the valid part of subvolume in both source and target\n ranges = np.zeros((len(imin), 2), dtype=np.int32)\n ranges[:, 1] = 1\n ranges[:len(self.w), 1] = self.w\n imin = np.int32(imin)\n imax = np.int32(imax)\n for i in range(len(imin)):\n if imin[i] < 0:\n ranges[i, 0] -= imin[i]\n imin[i] -= imin[i]\n if imax[i] >= shapev[i]:\n ranges[i, 1] -= ((imax[i] - shapev[i]))\n imax[i] -= ((imax[i] - shapev[i]))\n # now index accordingly:\n targetindex = tuple([slice(None)] + [slice(np.int32(r[0]), np.int32(r[1])) for r in ranges])\n sourcesindex = tuple([slice(np.int32(mi), np.int32(ma)) for mi, ma in zip(imin, imax)])\n tempdata[targetindex] = np.asarray([f[sourcesindex] for f in featuredata])\n\n if len(masks):\n templabels = np.zeros(self.w, dtype=np.uint8)\n templabels[targetindex[1:]] = np.asarray([f.squeeze()[sourcesindex] for f in masks])\n if one_hot and not self.regression:\n templabels = self._one_hot_vectorize(templabels, self.nclasses, zero_out_label=self.zero_out_label)\n\n\n else: # we need to interpolate\n coords = np.float64(np.mgrid[[slice(np.int32(imi), np.int32(ima)) for imi, ima in zip(imin, imax)]])\n # coords = np.mgrid[imin[0]:imax[0],imin[1]:imax[1],imin[2]:imax[2]]\n coords = self.transformAffine(coords)\n if np.sum(self.deform):\n # create deformationfield:\n deform = self._get_deform_field_dm\n\n self.deformfield = deform()\n coords += self.deformfield\n\n # and set accordingly:\n if len(masks):\n if one_hot and not self.regression:\n if len(masks) > 1:\n logging.getLogger('data').error(\n 'cant have more than one mask with one_hot encoding in griddatacollection')\n if self.softlabels:\n mask = self._one_hot_vectorize(np.int32(masks[0]), self.nclasses,\n zero_out_label=self.zero_out_label)\n templabels = [map_coordinates(mask[..., c].squeeze(), coords, order=1, cval=np.float32(c == 0))\n for c in range(self.nclasses)]\n templabels = np.concatenate([np.expand_dims(l, -1) for l in templabels], axis=-1)\n else:\n templabels = map_coordinates(masks[0].squeeze(), coords, order=0)\n templabels = self._one_hot_vectorize(templabels, self.nclasses,\n zero_out_label=self.zero_out_label)\n\n if needslabels:\n if np.sum(np.asarray(templabels[..., self.minlabel:])) == 0:\n return [], []\n\n else:\n # logging.getLogger('data').warning(\n # 'maybe you want to revise this section before using! when do we not need a onehot?')\n templabels = np.asarray(\n [map_coordinates(f.squeeze(), coords, order=1 if self.softlabels else 0) for f in masks])\n templabels = templabels.transpose([i for i in range(1, len(templabels.shape))] + [0])\n if needslabels:\n if np.sum(templabels >= self.minlabel) == 0:\n return [], []\n tempdata = [map_coordinates(np.float32(f).squeeze(), coords, mode=self.padding_rule,\n order=self.interpolation_order) for f in features]\n tempdata = [x.reshape((self.w + [1])) for x in tempdata] # FIXME: maybe we can just use expand_dims?\n if self.whiten:\n if self.whiten_subvolumes:\n raise Exception('not supported anymore')\n # for i in range(len(tempdata)):\n # tempdata[i] = tempdata[i] - np.mean(tempdata[i])\n # tempdata[i] /= np.sqrt(np.mean(tempdata[i] ** 2)) + 1e-20\n elif self.half_gaussian_clip:\n raise Exception('not supported anymore')\n # tempdata = [np.clip((x - means[i]) / (5 * stddevs[i]) - 1, -0.99999, 0.99999) for i, x in\n # enumerate(tempdata)]\n else:\n tempdata = [(x - means[i]) / stddevs[i] for i, x in enumerate(tempdata)]\n if self.vary_mean > 0 or self.vary_stddev > 0:\n tempdata = [x * ((self.deformrandomstate.rand() - 0.5) * self.vary_stddev + 1) + (\n self.deformrandomstate.rand() - 0.5) * self.vary_mean for x in tempdata]\n tempdata = np.concatenate(tempdata, -1)\n\n if np.sum(self.mirror):\n fr = []\n orig = []\n for i in self.mirror:\n fr.append(slice(None, None, np.int32(1 - self.deformrandomstate.randint(2) * i * 2)))\n orig.append(slice(None))\n fr.append(slice(None)) # features / labels\n orig.append(slice(None))\n tempdata[orig] = tempdata[fr]\n templabels[orig] = templabels[fr]\n if self.gaussiannoise > 0:\n tempdata *= (1 + (self.deformrandomstate.rand(*tempdata.shape) - 0.5) * self.gaussiannoise)\n return tempdata, templabels", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)", "def distorted_bounding_box_crop(image,\n labels,\n bboxes,\n min_object_covered=0.05,\n aspect_ratio_range=(0.9, 1.1),\n area_range=(0.1, 1.0),\n max_attempts=200,\n scope=None):\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bboxes]):\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n bboxes = tf.minimum(bboxes, 1.0)\n bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.expand_dims(bboxes, 0),\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n distort_bbox)\n \n #tf_image.tf_summary_image(dst_image, bboxes, 'images_with_bounding_box')\n tf.summary.image('images_with_bounding_box', image_with_box)\n\n distort_bbox = distort_bbox[0, 0]\n\n # Crop the image to the specified bounding box.\n cropped_image = tf.slice(image, bbox_begin, bbox_size)\n cropped_image.set_shape([None, None, 3])\n # Update bounding boxes: resize and filter out.\n bboxes = tfe.bboxes_resize(distort_bbox, bboxes)\n labels, bboxes, num = tfe.bboxes_filter_overlap(labels, bboxes,\n BBOX_CROP_OVERLAP)\n return cropped_image, labels, bboxes, distort_bbox,num", "def preprocess_for_train(image, labels, bboxes,\n out_shape, data_format='NHWC',\n scope='textbox_process_train'):\n\n with tf.name_scope(scope, 'textbox_process_train', [image, labels, bboxes]):\n if image.get_shape().ndims != 3:\n raise ValueError('Input must be of size [height, width, C>0]')\n\n \n # Convert to float scaled [0, 1].\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n \n tf_image.tf_summary_image(image, bboxes, 'image_color_origin')\n\n # Distort image and bounding boxes.\n bboxes = tf.minimum(bboxes, 1.0)\n bboxes = tf.maximum(bboxes, 0.0)\n dst_image, labels, bboxes, distort_bbox ,num= \\\n distorted_bounding_box_crop(image, labels, bboxes,\n aspect_ratio_range=CROP_RATIO_RANGE)\n\n tf_image.tf_summary_image(dst_image, bboxes, 'image_color_distorted')\n\n #dst_image = tf_image.resize_image( dst_image,out_shape,\n # method=tf.image.ResizeMethod.BILINEAR,\n # align_corners=False\n # )\n\n \n # Resize image to output size.\n dst_image ,bboxes = \\\n tf_image.resize_image_bboxes_with_crop_or_pad(dst_image, bboxes,\n out_shape[0],out_shape[1])\n\n tf_image.tf_summary_image(dst_image, bboxes, 'image_color_resize')\n\n # Randomly flip the image horizontally.\n dst_image, bboxes = tf_image.random_flip_left_right(dst_image, bboxes)\n\n #dst_image = tf_image.resize_image(dst_image, out_shape,\n # method=tf.image.ResizeMethod.BILINEAR,\n # align_corners=False)\n\n tf_image.tf_summary_image(dst_image, bboxes, 'random_flip')\n #dst_image.set_shape([None, None, 3])\n #dst_image.set_shape([out_shape[0], out_shape[1], 3])\n # Rescale to normal range\n image = dst_image * 255.\n #dst_image = tf.cast(dst_image,tf.float32)\n return image, labels, bboxes,num", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def reformat(x, y):\r\n # img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))\r\n img_size, num_ch, num_class = 14, 1, 16\r\n dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)\r\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??\r\n return dataset, labels", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # foreground RoIs\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\n # Sample foreground regions without replacement\n if fg_inds.size > 0:\n fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < BG_THRESH_HI) &\n (max_overlaps >= BG_THRESH_LO))[0]\n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\n # Sample background regions without replacement\n if bg_inds.size > 0:\n bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[fg_rois_per_this_image:] = 0\n rois = all_rois[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, bbox_targets, bbox_inside_weights", "def predictor_output_with_fine_and_coarse_segm_to_mask(\n predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType\n) -> BitMasks:\n H, W = image_size_hw\n boxes_xyxy_abs = boxes.tensor.clone()\n boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n N = len(boxes_xywh_abs)\n masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)\n for i in range(len(boxes_xywh_abs)):\n box_xywh = make_int_box(boxes_xywh_abs[i])\n labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh)\n x, y, w, h = box_xywh\n masks[i, y : y + h, x : x + w] = labels_i > 0\n return BitMasks(masks)", "def predictor_output_with_fine_and_coarse_segm_to_mask(\n predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType\n) -> BitMasks:\n H, W = image_size_hw\n boxes_xyxy_abs = boxes.tensor.clone()\n boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n N = len(boxes_xywh_abs)\n masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)\n for i in range(len(boxes_xywh_abs)):\n box_xywh = make_int_box(boxes_xywh_abs[i])\n labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh)\n x, y, w, h = box_xywh\n masks[i, y : y + h, x : x + w] = labels_i > 0\n return BitMasks(masks)", "def nextBatch(self, TRAIN=True, d=False):\n while True:\n if TRAIN==True:\n idx=np.random.randint(self.split*self.total)\n else:\n idx=np.random.randint(self.split*self.total,high=self.total)\n \n if len(self.roidb[idx])!=0:\n break\n \n data=self.imdb[idx][np.newaxis,:]\n gt_boxes=np.array(self.roidb[idx])\n \n maskdb=self.maskdb[idx]\n mask_max_x=0\n mask_max_y=0\n for ins in maskdb:\n if ins.shape[0]>mask_max_y:\n mask_max_y=ins.shape[0]\n if ins.shape[1]>mask_max_x:\n mask_max_x=ins.shape[1]\n\n gt_masks=np.zeros((len(maskdb),mask_max_y,mask_max_x))\n mask_info=np.zeros((len(maskdb),2))\n for j in range(len(maskdb)):\n mask=maskdb[j]\n mask_x=mask.shape[1]\n mask_y=mask.shape[0]\n gt_masks[j,0:mask_y,0:mask_x]=mask\n mask_info[j,0]=mask_y\n mask_info[j,1]=mask_x\n\n blobs={\n 'data': data,\n 'gt_boxes': gt_boxes,\n 'im_info': np.array([[data.shape[2],data.shape[3],1]], dtype=np.float32),\n 'gt_masks':gt_masks,\n 'mask_info':mask_info\n }\n if d: \n # i is always 1, in ultrasound case\n for i in range(blobs['data'].shape[0]):\n print blobs['im_info']\n print blobs['mask_info']\n print blobs['gt_boxes']\n img=blobs['data'][0,0]\n print img.shape\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.imshow(img)\n for j,bbox in enumerate(gt_boxes):\n blank=np.zeros_like(img)\n print blank.shape,maskdb[j].shape,bbox\n blank[bbox[1]:maskdb[j].shape[0]+bbox[1],bbox[0]:maskdb[j].shape[1]+bbox[0]]=maskdb[j]\n blank[blank>0]=1\n plt.imshow(blank,alpha=.9)\n ax.add_patch(patches.Rectangle((bbox[0],bbox[1]),bbox[2]-bbox[0],bbox[3]-bbox[1],fill=False))\n plt.text(bbox[0],bbox[1],bbox[-1],bbox=dict(facecolor='blue',alpha=0.5),fontsize=14, color='white')\n plt.show()\n for i in blobs:\n print i,blobs[i].shape\n print ''\n return blobs", "def make_sub_data_train(data, config):\n sub_input_sequence = []\n sub_label_sequence = []\n\n\tfor scale in range(2,5):\t \n\n\t for i in range(len(data)):\n\n\t\t#input_, label_, = preprocess(data[i], config.scale) # do bicbuic only one scale\n\t\tinput_, label_, = preprocess(data[i], scale) # do bicbuic turn around all scale\n\t\n\t\tif len(input_.shape) == 3: # is color\n\t\t h, w, c = input_.shape\n\t\telse:\n\t\t h, w = input_.shape # is grayscale\n\t\n\t\t#checkimage(input_)\t\t\n\n\t\tnx, ny = 0, 0\n\t\tfor x in range(0, h - config.image_size + 1, config.stride):\n\t\t nx += 1; ny = 0\n\t\t for y in range(0, w - config.image_size + 1, config.stride):\n\t\t\tny += 1\n\n\t\t\tsub_input = input_[x: x + config.image_size, y: y + config.image_size] # 41 * 41\n\t\t\tsub_label = label_[x: x + config.label_size, y: y + config.label_size] # 41 * 41\n\n\n\t\t\t# Reshape the subinput and sublabel\n\t\t\tsub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])\n\t\t\tsub_label = sub_label.reshape([config.label_size, config.label_size, config.c_dim])\n\n\t\t\t# Normialize\n\t\t\tsub_input = sub_input / 255.0\n\t\t\tsub_label = sub_label / 255.0\n\t\t\t\n\t\t\t#cv2.imshow(\"im1\",sub_input)\n\t\t\t#cv2.imshow(\"im2\",sub_label)\n\t\t\t#cv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t#cv2.waitKey(0)\n\n\t\t\t# Rotate 90,180,270\n\t\t\tfor angle in range(0,360,90):\t\n\t\t\t\tsub_input = rotate(sub_input,angle)\t\n\t\t\t\tsub_label = rotate(sub_label,angle)\t\n\t\t\n\t\t\t\t# Add to sequence\n\t\t\t\tsub_input_sequence.append(sub_input)\n\t\t\t\tsub_label_sequence.append(sub_label)\n\n\t\t\t\tcv2.imshow(\"im1\",sub_input)\n\t\t\t\tcv2.imshow(\"im2\",sub_label)\n\t\t\t\tcv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t\tcv2.waitKey(1)\n\t\t\t\t\n\n \n # NOTE: The nx, ny can be ignore in train\n return sub_input_sequence, sub_label_sequence, nx, ny", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, has_gt):\n\t# overlaps: (rois x gt_boxes)\n\toverlaps = bbox_overlaps(\n\t\tnp.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n\t\tnp.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n\tgt_assignment = overlaps.argmax(axis=1)\n\tmax_overlaps = overlaps.max(axis=1)\n\tlabels = gt_boxes[gt_assignment, 4]\n\n\t# Select foreground RoIs as those with >= FG_THRESH overlap\n\tfg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n\t# Guard against the case when an image has fewer than fg_rois_per_image\n\t# foreground RoIs\n\tfg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\n\t# Sample foreground regions without replacement\n\tif fg_inds.size > 0:\n\t\tfg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\n\n\t# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n\tbg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n\t\t\t\t\t (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\t# Compute number of background RoIs to take from this image (guarding\n\t# against there being fewer than desired)\n\tbg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n\tbg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\n\t# Sample background regions without replacement\n\tif bg_inds.size > 0:\n\t\tbg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\n\n\t# The indices that we're selecting (both fg and bg)\n\tif has_gt:\n\t\tkeep_inds = np.append(fg_inds, bg_inds)\n\telse:\n\t\tkeep_inds = bg_inds\n\n\t# Select sampled values from various arrays:\n\tlabels = labels[keep_inds]\n\t# Clamp labels for the background RoIs to 0\n\tlabels[int(fg_rois_per_this_image):] = 0\n\trois = all_rois[keep_inds]\n\n\t# bbox_target_data: [cls tx ty tw th]\n\tbbox_target_data = _compute_targets(\n\t\trois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n\tbbox_targets, bbox_inside_weights = \\\n\t\t_get_bbox_regression_labels(bbox_target_data, num_classes)\n\n\treturn labels, rois, bbox_targets, bbox_inside_weights", "def nms_all_class(bound_corr_objs, nms_thresh):\n bboxs, scores, masks, labels = [], [], [], []\n for obj in bound_corr_objs:\n bboxs.append(obj['box'])\n scores.append(obj['score'])\n # masks.append(obj['mask'])\n # labels.append(obj['label'])\n bboxs = np.asarray(bboxs)\n scores = np.asarray(scores)\n # masks = np.asarray(masks)\n # labels = np.asarray(labels)\n x1 = bboxs[:, 0]\n y1 = bboxs[:, 1]\n x2 = bboxs[:, 2]\n y2 = bboxs[:, 3]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n # cfvalid_ids = np.where(scores >= cf_thresh)[0]\n # scores = scores[cfvalid_ids]\n\n order = scores.argsort()[::-1]\n # mask_sizes = np.sum(masks, axis=(1, 2))\n # order = mask_sizes.argsort()[::-1]\n keep = []\n suppress = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n iou = inter / (areas[i] + areas[order[1:]] - inter)\n\n # mask_other = masks[order[1:], :, :]\n # mask_cur = masks[i, :, :]\n # mask_inter = np.sum(mask_cur & mask_other, axis=(1, 2))\n # mask_union = np.sum(mask_cur | mask_other, axis=(1, 2))\n # mask_iou = mask_inter / mask_union\n\n # inds = np.where((iou <= nms_thresh) & (mask_iou <= nms_thresh))[0]\n inds = np.where(iou <= nms_thresh)[0]\n order = order[inds + 1]\n\n # masks = masks[keep]\n # ids = ids[keep]\n return keep", "def sample_wnet(data_list, rows=15, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0,\n n_class=5, width=1):\n\n n_probmaps = data_list[0]['bound'].shape[0] # number of bounds\n cols = 5 + n_probmaps - 1\n n_batch = len(data_list)\n _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])\n\n for ind in range(n_batch):\n input = data_list[ind]['input']\n # print(\"input shape: {}\".format(input.shape))\n label = data_list[ind]['GT']\n pred = data_list[ind]['pred']\n bound_probmap = data_list[ind]['bound'] # predicted bound probmap\n\n # calculate average F1 score\n label_binary = label_binarize(label.flatten(), classes=range(n_class))\n pred_binary = label_binarize(pred.flatten(), classes=range(n_class))\n\n f_score = np.zeros(n_class, dtype=np.float32)\n slice_effect_class = 0\n for i in range(n_class):\n if np.sum(label_binary[:,i]) == 0:\n f_score[i] = 0.0\n else:\n slice_effect_class += 1\n f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])\n\n ave_f_score = np.sum(f_score)/slice_effect_class\n\n # calculate average HFD\n label_bound = mask2innerouterbound(label, width=width)\n pred_bound = mask2innerouterbound(pred, width=width)\n hdf = slicewise_hd95(pred_bound, label_bound, n_class)\n\n if (ind - start_with) % show_every == 0:\n i = (ind - start_with) // show_every\n if i < rows:\n ax[i, 0].imshow(input, cmap='gray')\n ax[i, 0].set_title(\"Slice {} : {}\".format(ind+start_inx, 'input'))\n ax[i, 0].axis('off')\n\n ax[i, 1].imshow(mask2rgb(label))\n ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'ground truth'))\n ax[i, 1].axis('off')\n\n ax[i, 2].imshow(mask2rgb(pred))\n ax[i, 2].set_title('Slice %d : %s' % (ind+start_inx, 'prediction'))\n ax[i, 2].axis('off')\n\n # plot overlapping between pred_bound and label_bound\n overlap = pred_bound.copy()\n overlap[label_bound != 0] = 4\n ax[i, 3].imshow(mask2rgb(overlap))\n ax[i, 3].set_title(\"Slice {:d} : bound hdf={:.4f}\".format(ind + start_inx, hdf))\n ax[i, 3].axis('off')\n\n # plot prob maps for intermediate bounds\n output_title = ['prob map (inner bound)', 'prob map (outer bound)'] if n_probmaps >= 3 else ['prob map']\n for c_inx in range(1, n_probmaps):\n ax[i, 3 + c_inx].imshow(bound_probmap[c_inx], cmap='seismic')\n ax[i, 3 + c_inx].set_title(\"Slice {:d} : {}\".format(ind + start_inx, output_title[c_inx - 1]))\n ax[i, 3 + c_inx].axis('off')\n\n ax[i, 3 + n_probmaps].scatter(range(0, n_class), f_score)\n ax[i, 3 + n_probmaps].set_title('Slice %d : Ave F-score = %0.2f' % (ind+start_inx, ave_f_score))\n ax[i, 3 + n_probmaps].set_ylabel('F score')\n ax[i, 3 + n_probmaps].set_ylim([-0.1, 1.1])\n\n if fig_name:\n plt.savefig(fig_name + '.pdf')\n plt.close()" ]
[ "0.68596137", "0.6554217", "0.6554217", "0.5882985", "0.585619", "0.5846074", "0.5843255", "0.58028907", "0.58009887", "0.57961977", "0.5764685", "0.57554805", "0.5748522", "0.57372177", "0.572652", "0.568638", "0.5678612", "0.56781596", "0.5647182", "0.5632262", "0.56277126", "0.5592061", "0.5587391", "0.55756044", "0.55756044", "0.5567086", "0.5565928", "0.5558076", "0.55498487", "0.5537128" ]
0.6877821
0
Convert predictor output with coarse and fine segmentation to a mask.
def predictor_output_with_fine_and_coarse_segm_to_mask( predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType ) -> BitMasks: H, W = image_size_hw boxes_xyxy_abs = boxes.tensor.clone() boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) N = len(boxes_xywh_abs) masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device) for i in range(len(boxes_xywh_abs)): box_xywh = make_int_box(boxes_xywh_abs[i]) labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh) x, y, w, h = box_xywh masks[i, y : y + h, x : x + w] = labels_i > 0 return BitMasks(masks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predictor_output_with_coarse_segm_to_mask(\n predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType\n) -> BitMasks:\n H, W = image_size_hw\n boxes_xyxy_abs = boxes.tensor.clone()\n boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n N = len(boxes_xywh_abs)\n masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)\n for i in range(len(boxes_xywh_abs)):\n box_xywh = make_int_box(boxes_xywh_abs[i])\n box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh)\n x, y, w, h = box_xywh\n masks[i, y : y + h, x : x + w] = box_mask\n\n return BitMasks(masks)", "def predictor_output_with_coarse_segm_to_mask(\n predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType\n) -> BitMasks:\n H, W = image_size_hw\n boxes_xyxy_abs = boxes.tensor.clone()\n boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n N = len(boxes_xywh_abs)\n masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)\n for i in range(len(boxes_xywh_abs)):\n box_xywh = make_int_box(boxes_xywh_abs[i])\n box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh)\n x, y, w, h = box_xywh\n masks[i, y : y + h, x : x + w] = box_mask\n\n return BitMasks(masks)", "def get_pred_mask(test_image, model):\n\n test_image= test_image=transforms.ToPILImage()(test_image)\n #test_image=Image.fromarray(test_image)\n new_mask = model(transforms.ToTensor()(test_image).unsqueeze(1).cuda())[1].transpose(1,2).transpose(2,3).cpu().detach().numpy().squeeze()\n return new_mask", "def reconstruct(mask, y_pred, pixels):\n\n # change mask so that cancer prediction is gone (ie prevent cheating)\n for pi in range(256):\n for pj in range(256):\n if int(round(mask[pi, pj])) == 2:\n mask[pi, pj] = 1\n\n # insert the actual prediction\n for p, pixel in enumerate(pixels):\n pixel_x = pixel[0]\n pixel_y = pixel[1]\n # set the new mask pixel to the predicted score, moving 0 -> 1 (prostate, not cancer)\n # and 1 -> 2 (prostate, cancer)\n mask[pixel_x, pixel_y] = y_pred[p] + 1\n\n return mask", "def get_regions_mask(self, input):", "def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "def Get_Mask_Predictors(mask_in, Image_Features, feature_dict, \n convert_length = 0.2204315, radius = 50, verbose = False): \n \n ## Expand mask into one-hot mask if input is flat\n if len(mask_in.shape)==2:\n mask_in = Expand_Mask(mask_in, num_class = nfeatures)\n \n ## Define factor by which to convert pixel area to area in square meters\n convert_area = convert_length**2\n \n ## create mask and index list that shows which image_features are buildings\n mask_buildings = Image_Features.Type.isin(['mBuild', 'tBuild']) \n ind_buildings = list(mask_buildings[mask_buildings].index) \n nbuildings = len(ind_buildings)\n \n ## Create submasks that distinguish modern buildings from huts \n mask_mods = Image_Features.Type[mask_buildings].isin(['mBuild'])\n ind_mods = list(mask_mods[mask_mods].index)\n mask_huts = Image_Features.Type[mask_buildings].isin(['tBuild'])\n ind_huts = list(mask_huts[mask_huts].index)\n \n ## Calculate distances between all buildings\n distance_mat = dist(Image_Features.loc[ind_buildings,{'x','y'}])\n \n Image_Features.loc[:, 'Local_Buildings'] = None\n Image_Features.loc[:, 'Local_Moderns'] = None\n Image_Features.loc[:, 'Local_Traditionals'] = None\n Image_Features.loc[:, 'Local_Forest_Area'] = None\n Image_Features.loc[:, 'Local_Bare_Area'] = None\n Image_Features.loc[:, 'Local_Modern_Area'] = None\n Image_Features.loc[:, 'Local_Trads_Area'] = None\n Image_Features.loc[:, 'Local_Focal_Area'] = 3.14159*radius**2\n \n # Loop through each building and collect statistics\n for ii in ind_buildings:\n ind = ind_buildings[ii]\n building_type = Image_Features.Type[ind]\n close_buildings = (distance_mat[ii, :] < radius).sum() - 1.0\n close_mods = (distance_mat[ii, ind_mods] < radius).sum() - 1.0*(building_type=='mBuild')\n close_huts = (distance_mat[ii, ind_huts] < radius).sum() - 1.0*(building_type=='tBuild')\n ##print('b' + str(ii))\n Image_Features.loc[ind, 'Local_Buildings'] = close_buildings \n Image_Features.loc[ind, 'Local_Moderns'] = close_mods \n Image_Features.loc[ind, 'Local_Traditionals'] = close_huts \n ##print('c' + str(ii)) \n ## Define mask that will select a circle around the focal building. Note\n ## that 0 and 1 indices of mask / image correspond to rows (y) and cols (x)\n x = np.arange(0, mask_in.shape[1])\n y = np.arange(0, mask_in.shape[0])\n ##print('d' + str(ii))\n ## Convert distances back into pixels\n cx = round(Image_Features.loc[ind, 'x'] / convert_length)\n cy = round(Image_Features.loc[ind, 'y'] / convert_length)\n r = (radius / convert_length)\n ##print('e' + str(ii))\n ## Make indicator mask of all pixels less than distance r from focal building\n mask = (x[np.newaxis,:]-cx)**2 + (y[:,np.newaxis]-cy)**2 < r**2 \n ##print('f' + str(ii)) \n ##print('mask' + str(mask.shape) + 'mask_in' + str(mask_in.shape))\n Image_Features.loc[ind, 'Local_Modern_Area'] = mask_in[mask,0].sum()*convert_area \n ##print('g' + str(ii)) \n Image_Features.loc[ind, 'Local_Trads_Area'] = mask_in[mask,1].sum()*convert_area \n ##print('h' + str(ii)) \n Image_Features.loc[ind, 'Local_Forest_Area'] = mask_in[mask,2].sum()*convert_area \n ##print('i' + str(ii)) \n Image_Features.loc[ind, 'Local_Bare_Area'] = mask_in[mask,3].sum()*convert_area \n if verbose:\n print(str(ii) + ' / ' + str(nbuildings))\n return Image_Features", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def preprocess_mask(y):\n y[y <= 255./2] = 0 # Needs to be in this order, otherwise 1 gets overwritten\n y[y > 255./2] = 1\n binary_mask = y.astype(np.uint8)\n\n return binary_mask", "def _preprocessing(image) -> np.ndarray:\n # TODO: Turn mapping into generic function.\n processed_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n processed_image[~mask] = 255\n return processed_image", "def model_masks(self, prunable=None):\n # TODO Also accept a dataloader\n pass\n # return masks", "def preprocess_mask(mask):\n # Project values interval on [0.0; 1.0]\n if mask.max() > 1:\n mask[mask <= 127.5] = 0.\n mask[mask > 127.5] = 1.\n else:\n mask[mask <= .5] = 0.\n mask[mask > .5] = 1.\n return mask", "def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)", "def create_mask(predictions_2d, sizeX, sizeY, chip_shape):\n\n # reshape predictions_2d\n predictions_2d_res = np.array(predictions_2d)\n predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)\n\n # create new mask of area of interest\n new_mask = np.zeros((chip_shape[1], chip_shape[2]))\n for x in range(0, chip_shape[1], 256):\n for y in range(0, chip_shape[2], 256):\n new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]\n\n return new_mask", "def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")", "def predict_mask(logit, EMPTY_THRESHOLD, MASK_THRESHOLD):\n #pred mask 0-1 pixel-wise\n #n = logit.shape[0]\n IMG_SIZE = logit.shape[-1] #256\n #EMPTY_THRESHOLD = 100.0*(IMG_SIZE/128.0)**2 #count of predicted mask pixles<threshold, predict as empty mask image\n #MASK_THRESHOLD = 0.22\n #logit = torch.sigmoid(torch.from_numpy(logit)).view(n, -1)\n #pred = (logit>MASK_THRESHOLD).long()\n #pred[pred.sum(dim=1) < EMPTY_THRESHOLD, ] = 0 #bug here, found it, the bug is input shape is (256, 256) not (16,256,256)\n logit = sigmoid(logit)#.reshape(n, -1)\n pred = (logit>MASK_THRESHOLD).astype(np.int)\n if pred.sum() < EMPTY_THRESHOLD:\n return np.zeros(pred.shape).astype(np.int)\n else:\n return pred", "def _preprocessing(mask, mapping, image) -> np.ndarray:\n # TODO: Turn mapping into generic function.\n processed_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n processed_image = cv.remap(processed_image, *mapping, cv.INTER_LINEAR)\n processed_image[~mask] = 255\n return processed_image", "def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask", "def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5", "def postprocess_masks(mask_predictions, frcnn_detection_dict):\n # [batch_size, max_num_proposals] e.g. 1, 300\n detection_classes = frcnn_detection_dict['classes']\n # e.g 300, 90, 33, 33 \n _, num_classes, mask_height, mask_width = mask_predictions.shape.as_list()\n batch_size, max_detection = detection_classes.get_shape().as_list()\n\n if num_classes > 1:\n # e.g. 300, 1, 33, 33\n mask_detections = tf.batch_gather(mask_predictions,\n tf.reshape(tf.to_int32(detection_classes) - 1, [-1, 1]))\n\n # e.g. 1, 300, 33, 33\n mask_detections = tf.reshape(tf.squeeze(mask_detections, axis=1),\n [batch_size, max_detection, mask_height, mask_width])\n\n mask_detections = tf.nn.sigmoid(mask_detections)\n return mask_detections", "def loadRes(self, detection_results, include_mask, is_image_mask=False):\n res = MaskCOCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n logging.info('Loading and preparing results...')\n predictions = self.load_predictions(\n detection_results,\n include_mask=include_mask,\n is_image_mask=is_image_mask)\n assert isinstance(predictions, list), 'results in not an array of objects'\n if predictions:\n image_ids = [pred['image_id'] for pred in predictions]\n assert set(image_ids) == (set(image_ids) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n\n if (predictions and 'bbox' in predictions[0] and predictions[0]['bbox']):\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for idx, pred in enumerate(predictions):\n bb = pred['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]\n if 'segmentation' not in pred:\n pred['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n pred['area'] = bb[2] * bb[3]\n pred['id'] = idx + 1\n pred['iscrowd'] = 0\n elif 'segmentation' in predictions[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for idx, pred in enumerate(predictions):\n # now only support compressed RLE format as segmentation results\n pred['area'] = maskUtils.area(pred['segmentation'])\n if 'bbox' not in pred:\n pred['bbox'] = maskUtils.toBbox(pred['segmentation'])\n pred['id'] = idx + 1\n pred['iscrowd'] = 0\n\n res.dataset['annotations'] = predictions\n\n res.createIndex()\n return res", "def preprocess(self):\n snr_mask = self._snr_preprocessing()\n flux_mask = self._flux_preprocessing()\n masking_mask = self._mask_preprocessing()\n return snr_mask & flux_mask & masking_mask", "def masking(X_train, X_test, y_train, y_test):\n # create mask to exclude NaN-values from train data\n mask_train = np.zeros(X_train.shape[0], dtype=np.bool)\n\n for i, subfeat in enumerate(X_train):\n if True in np.isnan(subfeat):\n mask_train[i] = True\n else:\n mask_train[i] = False\n\n # create mask to exclude NaN-values from test data\n mask_test = np.zeros(X_test.shape[0], dtype=np.bool)\n\n for i, subfeat in enumerate(X_test):\n if True in np.isnan(subfeat):\n mask_test[i] = True\n else:\n mask_test[i] = False\n\n # masking\n X_train = X_train[~mask_train]\n y_train = y_train[~mask_train]\n\n X_test = X_test[~mask_test]\n y_test = y_test[~mask_test]\n\n y_train = y_train.astype(\"int64\")\n y_test = y_test.astype(\"int64\")\n\n # exclude classes that are not included in both, test and train data\n difflist1 = list(set(np.unique(y_train)) - set(np.unique(y_test)))\n\n for i in difflist1:\n mask_train = y_train == i\n X_train = X_train[~mask_train]\n y_train = y_train[~mask_train]\n\n difflist2 = list(set(np.unique(y_test)) - set(np.unique(y_train)))\n\n for i in difflist2:\n mask_test = y_test == i\n X_test = X_test[~mask_test]\n y_test = y_test[~mask_test]\n\n return(X_train, X_test, y_train, y_test)", "def get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]", "def convert_tcia_labels(mask, keep_all_label=False):\n \n mask[np.isin(mask, [14])] = 0 # Remove duodenum\n label = [1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1] # no right kidney\n\n if keep_all_label:\n label += [0,0]\n\n return mask, label", "def postprocessing(self, prediction, prob_thresh=0.5):\n prob_map = self._np_sigmoid(prediction)\n prob_map = self._np_merge_prediction(prob_map)\n if self.resize :\n prob_map = self._np_resize_image(prob_map,\n self.orig_size,\n dtype='float')\n mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)\n return mask", "def mask2trimap(self, mask):\n fg_mask = (mask > 0).float()\n bg_mask = (mask < 0).float()\n trimap_width = getattr(self.opt, 'trimap_width', 20)\n trimap_width *= bg_mask.shape[-1] / self.opt.width\n trimap_width = int(trimap_width)\n bg_mask = cv2.erode(bg_mask.numpy(), kernel=np.ones((trimap_width, trimap_width)), iterations=1)\n bg_mask = torch.from_numpy(bg_mask)\n mask = fg_mask - bg_mask\n return mask" ]
[ "0.64958364", "0.64958364", "0.6187619", "0.6150233", "0.60465384", "0.592084", "0.59030396", "0.58995676", "0.5862994", "0.5852873", "0.5835467", "0.5810776", "0.5743541", "0.5702469", "0.56725556", "0.56671077", "0.56321526", "0.56278396", "0.5619452", "0.5614545", "0.5605009", "0.5578067", "0.55685806", "0.5558347", "0.5555303", "0.5533128", "0.55209404", "0.55182904", "0.55147487", "0.5512501" ]
0.6891425
1
method is playing morse code sounds depending on the input
def play_morse(tr_marks: list): play_morse_code = silence for mark in tr_marks: if mark == DOT: play_morse_code += sound play_morse_code += silence elif mark == COMMA: play_morse_code += sound play_morse_code += sound play_morse_code += sound play_morse_code += silence elif mark == PAUSE: play_morse_code += silence play_morse_code += silence play(play_morse_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_morse_code_audio(self, text):\n # The Morse-sender-dictionary letter keys are lower-case letters.\n lctext = text.lower()\n # Replace any newline characters with a space character.\n lctext = lctext.replace('\\n', ' ')\n # Loop and convert characters to Morse code audio.\n # All characters that are not in the Morse-sender-dictionary\n # and are not either a space or a tab character are discarded.\n silence_count = 0\n for c in lctext:\n if c in MorseCodeSender.MORSE_SENDER_DICT:\n code = MorseCodeSender.MORSE_SENDER_DICT[c]\n for dotdash in code:\n if dotdash == '.':\n # The symbol is a dot.\n self.sample_buffer.extend(self.dot_sample_buffer)\n else:\n # The symbol is a dash.\n self.sample_buffer.extend(self.dash_sample_buffer)\n # After each dot or dash, add one dot-duration of silence.\n self.sample_buffer.extend(self.silence_1_sample_buffer)\n # After each character, add 2 more dot-durations of silence\n # resulting in three dot-durations of silence after a letter.\n self.sample_buffer.extend(self.silence_2_sample_buffer)\n silence_count = 3\n else:\n # The letter is not in the Morse code dictionary. If the\n # letter is a space character or tab character, then make\n # sure there are 7 dot-durations of silence to create the\n # proper separation between words.\n if c == ' ' or c == '\\t':\n silence_length = 7 - silence_count\n if silence_length > 3:\n self.sample_buffer.extend(self.silence_4_sample_buffer)\n silence_length -= 4\n if silence_length > 1:\n self.sample_buffer.extend(self.silence_2_sample_buffer)\n silence_length -= 2\n if silence_length > 0:\n self.sample_buffer.extend(self.silence_1_sample_buffer)\n silence_length -= 1\n silence_count = 0", "def sound_callback(self, data):\n # print \"heard a loud noise!\"\n # print data.data\n sound = data.data.split(\" \")\n print sound[0]\n if float(sound[0]) > .8:\n if self.idling:\n # self.behav_pub.publish(\"greet\")\n self.ok = True\n self.control_pub.publish(\"idle stop; ed stop\")\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n # elif self.start_game != None:\n # self.ok = True\n # self.control_pub.publish(\"ed stop\")\n\n # self.behav_pub.publish(\"sleep\")\n # self.emotion_pub.publish(\"STARTLE\")", "def morseCodeTest():\r\n\r\n\thello = ['....','.','.-..','.-..','---']\r\n\tprint(morseDecode(hello))", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def text_morse(self, code):\n self.output.clear()\n text = list(code)\n for item in text:\n for data in morse:\n if item == data:\n self.output.append(morse[data])\n return \" \".join(self.output)", "def English2Morse ():\r\n \r\n morse_word = ''\r\n morse_list = []\r\n why = ''\r\n excp2 = True\r\n excp3 = True\r\n global dictionary\r\n global Morse_dict\r\n \r\n ## This part is building the Table, which is an dictionary,\r\n # that shows for each letter (the key), the equivalent dot-dash string (the value)\r\n # if newM is True, that means that the user has changed the symbols of the morse code, so, we are going to use the new table\r\n # if it is False, that means the uer did not change the symbols, so we can use the table wiht dash and dots\r\n # also, if the variable dictionary is false, it means that the user have not used the English2Morse function yet, so,\r\n ## we need to create the dictionary, otherwise, if it is true, the user have already created the dictionary,\r\n ## so it is not necessary to create it again\r\n # but, if the user changed the morse symbol, it is going to create the new dictionary\r\n if newM:\r\n Morse_Table = open('new_MorseTable.txt','r')\r\n else:\r\n if not dictionary:\r\n try:\r\n Morse_Table = open('MorseTable.txt','r')\r\n except:\r\n shell_connect.write('MorseTable.txt not found. Download it to continue.\\n','COMMENT')\r\n excp3 = False\r\n if excp3:\r\n if not dictionary:\r\n for line in Morse_Table:\r\n Morse_dict.update({line[0]:line[2:-1]})\r\n Morse_Table.close()\r\n dictionary = True\r\n ## take the name of the file which has english letters to translate the text into morse code\r\n #try until the user enter a file that exists\r\n while True:\r\n try:\r\n filename = input(\"Please, write the name of the file (with extension) which has letters: \\n\")\r\n E2M_file = open(filename,'r')\r\n break\r\n except:\r\n shell_connect.write('File does not exist, try again.\\n','COMMENT')\r\n # creates the file which is going to have the message in morse code\r\n morse_hidden_file = open(filename[:-4]+'_hidden.txt','w')\r\n # for each line in the file, take each word. For each word, transform the letters into morse code\r\n # Invariant: line is always less than or equal to the number of lines in the english to morse file\r\n for line in E2M_file:\r\n # If the lines begins with a space, then write the spaces in the file being created and remove them from the line being read\r\n while line[0] == ' ':\r\n morse_hidden_file.write(' ')\r\n line = line[1:]\r\n words_list = line.split()\r\n if words_list != []:\r\n # if the user made a mistake and chose a file which is already in morse code, an error message is shown\r\n if words_list[0][0] == var1 or words_list[0][0] == var2 or words_list[0][0] == '*':\r\n excp = False\r\n excpwhy = 'The file is not a letter file'\r\n shell_connect.write('\\nThis is not a letter file!\\n','COMMENT')\r\n break\r\n else:\r\n # for each letter in each word, print the correspondent morse code in the new file\r\n # Invariant: word is always less than or equal to number of words in words_list\r\n for word in words_list:\r\n # Invariant: letter is always less than or equal to the number of letters in word\r\n for letter in word:\r\n # if the letter is uppercase, add a '*' symbol in front of the letter and transform the lowercase of that letter into morse code\r\n try:\r\n if not letter.isupper():\r\n #transform the letter into morse code\r\n morse_word = morse_word + Morse_dict[letter] + ' '\r\n else:\r\n letter = letter.lower()\r\n morse_word = morse_word +'*'+ Morse_dict[letter] + ' '\r\n except:\r\n ##letter not found\r\n excp2 = False\r\n why = letter + ', '\r\n excpwhy = 'warning: {} not found and not printed'.format(why)\r\n morse_hidden_file.write(morse_word)\r\n morse_word = ''\r\n morse_hidden_file.write(' ')\r\n excp = True\r\n morse_hidden_file.write('\\n')\r\n else:\r\n morse_hidden_file.write('\\n')\r\n E2M_file.close()\r\n morse_hidden_file.close()\r\n # if excp is True, that means all went right, if it is False, that means that something went wrong\r\n if excp:\r\n if excp2:\r\n shell_connect.write('\\nEnglish to Morse sucessful!','STRING')\r\n else:\r\n shell_connect.write('\\nEnglish to Morse partially sucessful, some symbols not found were ignored.','KEYWORD')\r\n print('\\nThe file {} was created.'.format(filename[:-4]+'_hidden.txt'))\r\n else:\r\n shell_connect.write('\\nEnglish to Morse failed. '+excpwhy+'.\\n','COMMENT')", "def _do_send(self, text):\n self.sample_buffer = bytearray()\n # Fill self.sample_buffer with audio samples.\n self._create_morse_code_audio(text)\n # Write self.sample_buffer data to a wave audio file.\n self._create_wave_file()\n self.sample_buffer = None\n # Play the Morse code audio file.\n self.audio_finished_event.clear()\n self.player = sound.Player(self.audio_file_name)\n self.player.finished_handler = self._audio_finished_handler\n self.player.play()", "def mcc():\n morse = {\"A\": \".-\",\n \"B\": \"-...\",\n \"C\": \"-.-.\",\n \"D\": \"-..\",\n \"E\": \".\",\n \"F\": \"..-.\",\n \"G\": \"--.\",\n \"H\": \"....\",\n \"I\": \"..\",\n \"J\": \".---\",\n \"K\": \"-.-\",\n \"L\": \".-..\",\n \"M\": \"--\",\n \"N\": \"-.\",\n \"O\": \"---\",\n \"P\": \".--.\",\n \"Q\": \"--.-\",\n \"R\": \".-.\",\n \"S\": \"...\",\n \"T\": \"-\",\n \"U\": \"..-\",\n \"V\": \"...-\",\n \"W\": \".--\",\n \"X\": \"-..-\",\n \"Y\": \"-.--\",\n \"Z\": \"--..\",\n \"0\": \"-----\",\n \"1\": \".----\",\n \"2\": \"..---\",\n \"3\": \"...--\",\n \"4\": \"....-\",\n \"5\": \".....\",\n \"6\": \"-....\",\n \"7\": '--...',\n \"8\": \"---..\",\n \"9\": \"----.\",\n \".\": \".-.-.-\",\n ',': \"--..--\"}\n\n print(morse[input('enter character to be converted').upper()])\n\n print(\n f'{morse[input(\"1:\").upper()]} '\n f'{morse[input(\"2:\").upper()]} '\n f'{morse[input(\"3:\").upper()]} '\n f'{morse[input(\"4:\").upper()]} '\n f'{morse[input(\"5:\").upper()]} '\n f'{morse[input(\"6:\").upper()]}')", "def morse_text(self, code):\n self.output.clear()\n text = code.split()\n for item in text:\n for keys, values in morse.items():\n if item == values:\n self.output.append(keys)\n return \"\".join(self.output)", "def play_game(self):\n \n# self.display_letter_prompt()\n\n if self.input_letter != None:\n if self.input_letter == self.current_prompt:\n self.correct_response()\n else:\n self.incorrect_response()\n\n self.frames_passed += 1\n\n if self.prompt_vibrated == False:\n self.vibrate_buttons()\n self.prompt_vibrated = True\n\n if self.frames_passed > (self.delay * self.fps * 0.07):\n self.vibrate_buttons()\n self.frames_passed = 0", "def play(self, player, game):\n super().play(player, game)\n game.set_action(\"STEAL_CODER\")", "def game_play(self):", "def play_game():\n pass", "def speak():\n sentences = ['DESTROY ALL HU- I MEAN GREETINGS MEAT BAG',\n 'She sells sea shells by the sea shore', 'Other sentence']\n while True:\n AUDIO.speak(sentences[randint(0, 2)])\n sleep(15)", "def english_to_morse(\n input_file: str = \"lorem.txt\",\n output_file: str = \"lorem_morse.txt\"\n):", "def play_prog(self):\r\n\r\n serial_number = range(47845, 47869)\r\n chord_number = range(1, 25)\r\n for i in self.cnv:\r\n # Look for matching audio files and play them.\r\n try:\r\n filename = \"audio files/{}__{}.wav\".format(serial_number[i-1], chord_number[i-1])\r\n playsound.playsound(filename)\r\n except FileNotFoundError:\r\n print('Error: audio files not found.')", "def play_again(self):\n \n toplay_orig = [\n labyrinth_text.left_play_again,\n labyrinth_text.right_play_same_again,\n labyrinth_text.escape_terminate\n ]\n toplay = toplay_orig[:]\n \n while True:\n self.audio.do_play()\n\n if len(toplay) > 0:\n self.audio.synthesize_and_play(toplay.pop(0))\n\n # get next pygame event in queue\n event = pygame.event.poll()\n\n if event.type == pygame.locals.QUIT:\n return 'terminate'\n \n if event.type == pygame.locals.KEYDOWN:\n # log key press\n keyname = pygame.key.name(event.key)\n liblog.log('key pressed: {}'.format(keyname))\n \n # any key press stops audio\n self.audio.stop_playback()\n self.audio.clear_queue()\n \n # arrow keys to select new game, same game or quit\n if event.key == pygame.locals.K_LEFT:\n return 'new'\n if event.key == pygame.locals.K_RIGHT:\n liblog.log('restarting same labyrinth')\n return 'same'\n\n # space bar to repeat options\n if event.key == pygame.locals.K_SPACE:\n toplay = toplay_orig[:]\n continue\n \n # escape to terminate\n if event.key == pygame.locals.K_ESCAPE:\n return 'terminate'\n\n # F2 and F3 to change speaking rate\n if event.key == pygame.locals.K_F2:\n self.audio.change_speed('slower', labyrinth_text.speed_changed)\n continue\n if event.key == pygame.locals.K_F3:\n self.audio.change_speed('faster', labyrinth_text.speed_changed)\n continue\n\n # F1 for help\n if event.key == pygame.locals.K_F1:\n self.help()\n continue\n \n # unknown key \n self.audio.play_sound_file('data/audio/effects/blop.wav')", "def _control_play(self, entities: List[str]):\n if entities:\n self.player.play(entities)\n else:\n self.player.respond(\"I'm sorry, I couldn't find that for you.\")", "def play(self, player, game): \n super().play(player, game)\n game.set_action(\"SLEEP_CODER\")", "def morse_input(s):\n buf = []\n while True:\n b = ser.read().decode('utf-8')\n buf.append(b)\n if b == RESET_PAUSE:\n buf = []\n if b == LONG_PAUSE or b == SHORT_PAUSE:\n yield ''.join(buf)\n buf = []", "def sound_effects(sound):\n global effect # Making effect global so it can be used outside this function\n effect = pygame.mixer.Sound(sound) # Loading sound files\n effect.play(0) # Playing sound files", "def partialMorseCodeTest():\r\n\r\n\t# This is a partial representation of the word TEST, amongst other possible combinations\r\n\ttest = ['x','x','x..','x']\r\n\tprint(morsePartialDecode(test))\r\n\r\n\t# This is a partial representation of the word DANCE, amongst other possible combinations\r\n\tdance = ['x..','x-','x.','x.-.','x']\r\n\tprint(morsePartialDecode(dance))", "async def prog(ctx, note:str,amount=3):\n answer = Tempo.getNoteProg(note,amount)\n solution = 'Generated Key Progression '+str(answer)\n await ctx.send(solution)\n if ctx.author.voice is not None:\n vc = await ctx.author.voice.channel.connect()\n for i in range(len(answer)):\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio('sounds/'+str(answer[i])+'.mp3'))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n time.sleep(1)\n await vc.disconnect()", "def main() -> None:\n message = \"Morse code here!\"\n print(message)\n message = encrypt(message)\n print(message)\n message = decrypt(message)\n print(message)", "def play_game():\n pass", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def speak(audio):\n engine.say(audio)\n engine.runAndWait()", "def play(code):\n print('Playing ' + code)\n subprocess.call(['mpc', 'clear'])\n subprocess.call(['mpc', 'add', code])\n subprocess.call(['mpc', 'play'])", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def configure_with_sound_control(self):\n\t\tfor q in self.questions:\n\t\t\tq[\"question\"] = sc(q[\"question\"]) #reconfiguring the question to a sound control object\n\t\t\tif not q.get(\"on_wrong\") == None: #making sure that the on_wrong option is not set to None befor setting it be a sound control object\n\t\t\t\tq[\"on_wrong\"] = sc(q[\"on_wrong\"])\n\t\t\tif not q.get(\"on_correct\") == None: #making sure that the on_correct option is not set to None befor setting it to be a sound control object\n\t\t\t\tq[\"on_correct\"] = sc(q[\"on_correct\"])\n\n\t\tself.result_sayer = sc(\"audio_files/QUIZ MODE.wav\")# specifying the result sayer" ]
[ "0.7356725", "0.6545013", "0.65217036", "0.64552045", "0.6409863", "0.6388827", "0.63310593", "0.62088245", "0.61421", "0.60523164", "0.604278", "0.6028385", "0.6028272", "0.6024971", "0.5971374", "0.59319043", "0.5907226", "0.5903722", "0.5879051", "0.5868905", "0.5835449", "0.5818619", "0.5791489", "0.57840943", "0.5783373", "0.5739102", "0.5739102", "0.57364935", "0.5735924", "0.57276267" ]
0.73421204
1
Call a tf_hub.Module using the standard blundell signature. This expects that `module` has a signature named `signature` which conforms to ('sequence', 'sequence_length') > output To use an existing SavedModel file you may want to create a module_spec with `tensorflow_hub.saved_model_module.create_module_spec_from_saved_model`.
def call_module(module, one_hots, row_lengths, signature): if signature not in module.get_signature_names(): raise ValueError('signature not in ' + six.ensure_str(str(module.get_signature_names())) + '. Was ' + six.ensure_str(signature) + '.') inputs = module.get_input_info_dict(signature=signature) expected_inputs = [ 'sequence', 'sequence_length', ] if set(inputs.keys()) != set(expected_inputs): raise ValueError( 'The signature_def does not have the expected inputs. Please ' 'reconfigure your saved model to only export signatures ' 'with sequence and length inputs. (Inputs were %s, expected %s)' % (str(inputs), str(expected_inputs))) outputs = module.get_output_info_dict(signature=signature) if len(outputs) > 1: raise ValueError('The signature_def given has more than one output. Please ' 'reconfigure your saved model to only export signatures ' 'with one output. (Outputs were %s)' % str(outputs)) return list( module({ 'sequence': one_hots, 'sequence_length': row_lengths, }, signature=signature, as_dict=True).values())[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modules_in_function_signature_unwrapped(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.unwrapped_predict\n expected_modules = {\n 'json',\n 'collections',\n 'sklearn',\n 'cloudpickle',\n 'requests',\n }\n extracted_modules: Set[str] = md.modules_in_function_signature(func)\n assert extracted_modules == expected_modules", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)", "def test_modules_in_function_signature_wrapped(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.predict\n expected_modules = {\n 'calendar',\n 'datetime',\n 'numpy',\n 'google',\n 'pandas',\n }\n extracted_modules: Set[str] = md.modules_in_function_signature(func)\n assert extracted_modules == expected_modules", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def in_graph_inferrer(sequences,\n savedmodel_dir_path,\n signature,\n name_scope='inferrer'):\n # Add variable to make it easier to refactor with multiple tags in future.\n tags = [tf.saved_model.tag_constants.SERVING]\n\n # Tokenization\n residues = tf.strings.unicode_split(sequences, 'UTF-8')\n # Convert to one-hots and pad.\n one_hots, row_lengths = utils.in_graph_residues_to_onehot(residues)\n module_spec = hub.saved_model_module.create_module_spec_from_saved_model(\n savedmodel_dir_path)\n module = hub.Module(module_spec, trainable=False, tags=tags, name=name_scope)\n return call_module(module, one_hots, row_lengths, signature)", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def test_signature(self):\n with open(\"{}/{}\".format(self.APP_PATH, self.TARGET_PY_FILE),\n 'r', encoding=\"utf-8\", errors='ignore') as f:\n read_data = f.read()\n # Check [def predict()] section\n with self.subTest(name=\"[def handle()] in main.py\"):\n self.assertIsNotNone(\n re.search(r'def\\s+handle\\(\\w+\\)', read_data),\n msg=\"[def handle()] signature is missing or incorrect\")", "def halp(module_text):\n input_lines = module_text.splitlines()\n input, old_outputs = strip_old_outputs(input_lines)\n env = set_up_globals(Halp(old_outputs))\n output = format_part(eval_module(input, env))\n return diff(output.splitlines(), input_lines)", "def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):\n if os.path.exists(hdf5_path) and (not redownload):\n print('Loading BigGAN hdf5 file from:', hdf5_path)\n return h5py.File(hdf5_path, 'r')\n\n print('Loading BigGAN module from:', module_path)\n tf.reset_default_graph()\n hub.Module(module_path)\n print('Loaded BigGAN module from:', module_path)\n\n initializer = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(initializer)\n\n print('Saving BigGAN weights to :', hdf5_path)\n h5f = h5py.File(hdf5_path, 'w')\n for var in tf.global_variables():\n val = sess.run(var)\n h5f.create_dataset(var.name, data=val)\n print(f'Saving {var.name} with shape {val.shape}')\n h5f.close()\n return h5py.File(hdf5_path, 'r')", "def log_model(\n tf_saved_model_dir,\n tf_meta_graph_tags,\n tf_signature_def_key,\n artifact_path,\n conda_env=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n registered_model_name=None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n pip_requirements=None,\n extra_pip_requirements=None,\n):\n return Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.tensorflow,\n tf_saved_model_dir=tf_saved_model_dir,\n tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key,\n conda_env=conda_env,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n )", "def add_module(self, *args, **kwargs):\n# if 'path' in kwargs:\n# path = kwargs['path']\n# else:\n# path = os.getcwd()\n#\n# if len(args) > 0:\n# module = args[0]\n# elif 'module' in kwargs:\n# module = kwargs['module']\n#\n# if 'path' not in kwargs:\n# path = os.getcwd()\n# kwargs['path'] = path\n\n if 'module' not in kwargs:\n if len(args) > 0:\n module = args[0]\n kwargs['module'] = module\n\n# if 'module' in kwargs:\n if len(kwargs) > 0:\n self._data.add_detector(self._name, **kwargs)", "def test_signature_for_pyfunc_predict_quantiles(\n auto_arima_model, model_path, data_airline, use_signature\n):\n model_path_primary = model_path.joinpath(\"primary\")\n model_path_secondary = model_path.joinpath(\"secondary\")\n flavor.save_model(sktime_model=auto_arima_model, path=model_path_primary)\n loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path_primary)\n predict_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_quantiles\",\n \"alpha\": ALPHA,\n }\n ]\n )\n forecast = loaded_pyfunc.predict(predict_conf)\n signature = infer_signature(data_airline, forecast) if use_signature else None\n flavor.save_model(auto_arima_model, path=model_path_secondary, signature=signature)\n mlflow_model = Model.load(model_path_secondary)\n assert signature == mlflow_model.signature", "def _make_model_v2():\n class CustomModule(tf.Module):\n\n def __init__(self):\n super().__init__()\n self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')\n\n @tf.function\n def __call__(self, x):\n y = self.m * x + 1\n return y\n\n @tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])\n def length(self, x):\n return tf.reduce_sum(self(x) - x, name='length')\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32),\n tf.TensorSpec((None, 3), tf.float32)])\n def scalar_multiply(self, z, x):\n return tf.multiply(z, x, name='scale_mult')\n\n module = CustomModule()\n\n # Make a concrete version of __call__\n call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))\n\n tf.saved_model.save(\n module, tf_export_path, signatures={\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,\n 'length': module.length,\n 'scalar_multiply': module.scalar_multiply\n }\n )", "def create_tokenizer_from_hub_module(bert_path, sess):\n bert_module = hub.Module(bert_path)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n vocab_file, do_lower_case = tf.print(\n [tokenization_info[\"vocab_file\"], tokenization_info[\"do_lower_case\"]]\n )\n\n return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def create_tokenizer_from_hub_module(bert_path):\n bert_module = hub.Module(bert_path)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n sess = tf.Session()\n vocab_file, do_lower_case = sess.run(\n [tokenization_info[\"vocab_file\"], tokenization_info[\"do_lower_case\"]]\n )\n sess.close()\n return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)", "def create_tokenizer_from_hub_module(bert_model_hub):\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_model_hub)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n \n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)", "def _ensure_module_name_in_signature(\n signode: sphinx.addnodes.desc_signature) -> None:\n for node in signode.traverse(condition=sphinx.addnodes.desc_addname):\n modname = signode.get('module')\n if modname and not node.astext().startswith(modname + '.'):\n node.insert(0, docutils.nodes.Text(modname + '.'))\n break", "def example_function_in_example_module():\n pass", "def call_module(session, module, args, libgmt):\n c_call_module = libgmt.GMT_Call_Module\n c_call_module.argtypes = [ctypes.c_void_p, ctypes.c_char_p,\n ctypes.c_int, ctypes.c_void_p]\n c_call_module.restype = ctypes.c_int\n\n mode = get_constant('GMT_MODULE_CMD', libgmt)\n status = c_call_module(session, module.encode(), mode,\n args.encode())\n check_status_code(status, 'GMT_Call_Module')", "def load(handle, tags=None, options=None):\n if not isinstance(handle, str):\n raise ValueError(\"Expected a string, got %s\" % handle)\n module_path = resolve(handle)\n is_hub_module_v1 = tf.io.gfile.exists(\n native_module.get_module_proto_path(module_path))\n if tags is None and is_hub_module_v1:\n tags = []\n\n if options:\n if not hasattr(getattr(tf, \"saved_model\", None), \"LoadOptions\"):\n raise NotImplementedError(\"options are not supported for TF < 2.3.x,\"\n \" Current version: %s\" % tf.__version__)\n # tf.compat.v1.saved_model.load_v2() is TF2 tf.saved_model.load() before TF2\n obj = tf.compat.v1.saved_model.load_v2(\n module_path, tags=tags, options=options)\n else:\n obj = tf.compat.v1.saved_model.load_v2(module_path, tags=tags)\n obj._is_hub_module_v1 = is_hub_module_v1 # pylint: disable=protected-access\n return obj", "def __init__(self,\n module_path='https://tfhub.dev/deepmind/bigbigan-resnet50/1',\n allow_growth=True):\n self._module = hub.Module(module_path)\n\n # encode graph\n self.enc_ph = self.make_encoder_ph()\n self.z_sample = self.encode_graph(self.enc_ph)\n self.z_mean = self.encode_graph(self.enc_ph, return_all_features=True)['z_mean']\n\n # decode graph\n self.gen_ph = self.make_generator_ph()\n self.gen_samples = self.generate_graph(self.gen_ph, upsample=True)\n\n # session\n init = tf.global_variables_initializer()\n gpu_options = tf.GPUOptions(allow_growth=allow_growth)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n self.sess.run(init)", "def signature(function):\n pass", "def test_modules_in_function_body_unwrapped(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.unwrapped_predict\n expected_modules = {'verta', 'click'}\n extracted_modules: Set[str] = md.modules_in_function_body(func)\n assert extracted_modules == expected_modules", "def create_tokenizer_from_hub_module(bert_model_hub):\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_model_hub)\n tokenization_info = bert_module(signature=\"tokenization_info\",\n as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([\n tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]\n ])\n\n return bert.tokenization.FullTokenizer(vocab_file=vocab_file,\n do_lower_case=do_lower_case)" ]
[ "0.5419747", "0.5326547", "0.5326547", "0.52860445", "0.527043", "0.5221019", "0.5221019", "0.5221019", "0.5197618", "0.5174877", "0.5174877", "0.51694185", "0.51559496", "0.49624866", "0.4918613", "0.4902309", "0.48774195", "0.48640463", "0.48298866", "0.48239872", "0.48081285", "0.47581813", "0.47432384", "0.47426462", "0.47133026", "0.47118852", "0.4707307", "0.47034264", "0.46954957", "0.46903676" ]
0.6811007
0
Alternative constructor for Inferrer that is memoized.
def memoized_inferrer( savedmodel_dir_path, activation_type=tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY, batch_size=16, use_tqdm=False, session_config=None, memoize_inference_results=False, use_latest_savedmodel=False, ): return Inferrer( savedmodel_dir_path=savedmodel_dir_path, activation_type=activation_type, batch_size=batch_size, use_tqdm=use_tqdm, session_config=session_config, memoize_inference_results=memoize_inference_results, use_latest_savedmodel=use_latest_savedmodel, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, f):\n self.f = f\n self.memo = {}\n print('Calling __init__()')", "def get_inferrer_for(__call__, self, fn):\n tracking = getattr(fn, 'tracking_id', None)\n if tracking is None:\n return __call__(self, fn)\n if fn not in self.constructors:\n fn_generic = dc_replace(fn, tracking_id=None)\n inf = __call__(self, fn_generic)\n self.constructors[fn] = TrackedInferrer(inf)\n return self.constructors[fn]", "def dp_memoize_instance(f):\n \n memoize = memoize_limited(max_size=None, max_mem_MB=25)\n return memoize(f)", "def memoize(f):\n class MemoDict(dict):\n def __init__(self, func):\n self.func = func\n\n def __call__(self, *args):\n return self[args]\n\n def __missing__(self, key):\n result = self[key] = self.func(*key)\n return result\n\n return MemoDict(f)", "def memoize(f):\n\n class memodict(dict):\n @wraps(f)\n def __getitem__(self, *args):\n return super(memodict, self).__getitem__(*args)\n\n def __missing__(self, key):\n self[key] = ret = f(key)\n return ret\n\n return memodict().__getitem__", "def memoize(f):\n\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n self.__name__ = f.__name__\n\n def __call__(self, *args):\n global memoized_return_values\n try:\n return memoized_return_values[self.__name__]\n except KeyError:\n ret = memoized_return_values[self.__name__] = self.f(*args)\n return ret\n\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoizex(f):\n # http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/\n class MemoDict(dict):\n def __missing__(self, key):\n result = self[key] = f(key)\n return result\n return MemoDict().__getitem__", "def __init__(self, method=None):\n\n super().__init__(method)\n self.__cache_name__ = self._get_cache_key()", "def memo(qty):\n def decorator(f):\n decoratee = Memo(qty,f)\n return functools.wraps(f)(decoratee)\n return decorator", "def memoized(f):\n GlobalCache._caches[f] = {}\n GlobalCache._locks[f] = RLock()\n\n return decorator(GlobalCache.memoize, f)", "def memoize(func):\n memo = None\n\n @wraps(func)\n def wrapper(self):\n if memo is not None:\n return memo\n\n return func(self)\n\n return wrapper", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(func):\n @wraps(func)\n def memoizer(self):\n if not hasattr(self, '_cache'):\n self._cache = {}\n if func.__name__ not in self._cache:\n self._cache[func.__name__] = func(self)\n return self._cache[func.__name__]\n return memoizer", "def memoize(f):\n\n class memodict(dict):\n def __getitem__(self, *key):\n return dict.__getitem__(self, key)\n\n def __missing__(self, key):\n self[key] = ret = f(*key)\n return ret\n\n return memodict().__getitem__", "def destantiate(self, memo):\n raise NotImplementedError()", "def memoize(obj):\r\n cache = obj.cache = {}\r\n\r\n @functools.wraps(obj)\r\n def memoizer(*args, **kwargs):\r\n key = str(args) + str(kwargs)\r\n if key not in cache:\r\n cache[key] = obj(*args, **kwargs)\r\n # only keep the most recent 100 entries\r\n if len(cache) > 100:\r\n cache.popitem(last=False)\r\n return cache[key]\r\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n\n return memoizer", "def __init__(self):\n self.freq = collections.defaultdict(set)\n self.cache = collections.defaultdict()\n self.max_freq = 0\n self.min_freq = 0", "def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self,entries=None,memoized=False):\n if entries is not None:\n self.entries = entries[:]\n else:\n self.entries = []\n self.memoized = memoized", "def memoized(fget):\n attr_name = \"_{0}\".format(fget.__name__)\n\n @wraps(fget)\n def fget_memoized(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fget(self))\n return getattr(self, attr_name)\n\n return property(fget_memoized)", "def __init__(self, *args, **kwargs):\n self._cachedict = {}" ]
[ "0.57097083", "0.565469", "0.54636115", "0.5359008", "0.53518945", "0.52622575", "0.5254688", "0.5254688", "0.5246884", "0.52231497", "0.52179325", "0.52097183", "0.52092415", "0.52090156", "0.51751125", "0.5166283", "0.5138301", "0.5130604", "0.51206917", "0.51206917", "0.51206917", "0.51126635", "0.5087249", "0.5055388", "0.5054541", "0.5054541", "0.5054541", "0.50516015", "0.5045267", "0.5037069" ]
0.6149901
0
Gets the value of a variable from the graph.
def get_variable(self, variable_name): with self._graph.as_default(): return self._sess.run(self._get_tensor_by_name(variable_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_Variable(self, node):\n var_name = node.value\n val = self.VARIABLES.get(var_name)\n if val is None:\n raise NameError(repr(var_name))\n else:\n return val", "def get_value(self, var_name, tf_session):\n\n if var_name in self.learning_parameters:\n\n value = tf_session.run(self.learning_parameters[var_name])\n\n elif var_name in self.layers:\n\n value = tf_session.run(self.layers[var_name])\n\n else:\n print(\"Unknown DQN variable: \" + var_name)\n assert(0) # <3\n\n return(value)", "def get_variable_value(self, name):\n return self._design.GetVariableValue(name)", "def get_variable(self, name):\n if self._scalamagic:\n intp = self.scala_interpreter\n intp.interpret(name)\n return intp.last_result()", "def getVariable(self, gradientCoordinate):\n return self.variables[gradientCoordinate]", "def getVariable(self):\n return _libsbml.Rule_getVariable(self)", "def getVar(self, id):\n if id in self.variables:\n return self.variables[id]", "def get_sparql_value(sresult, variable_id):\n val = ''\n if variable_id in sresult:\n val = sresult[variable_id]['value']\n return val", "def get(self, var):\n s = self.eval('{0}'.format(var))\n return self.strip_answer(s)", "def getVariable(self, varName):\n return self[varName]", "def get(self, name, **valuefilter):\n if not valuefilter:\n valuefilter = self.valuefilter\n varobj = Variable(name, **valuefilter)\n value = varobj.get(gid=self.gid)\n return value", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def get_variable(self, svc, var):\n action = \"variableget\"\n path = \"data_request?id=%s&DeviceNum=%d&serviceId=%s&Variable=%s\" \\\n % (action, self.id, svc, var)\n return self.vera.get(path)", "def get_airflow_variable(key: str) -> str:\n return models.Variable.get(key)", "def var(self) -> float:\n return self._data.var()", "def get_node_with_name(self, name):\n\t return self.variables[name]", "def get_variable(x):\n return x.cuda() #if use_cuda else x", "def get_variable(self, name):\n return self._properties[name]", "def getVariable(self):\n return _libsbml.EventAssignment_getVariable(self)", "def get_assignment(self, var):\n return self.variable_to_value.get(var)", "def get_variable(x, volatile=False):\n tensor = torch.cuda.LongTensor(x) if CUDA else torch.LongTensor(x)\n return autograd.Variable(tensor, volatile=volatile)", "def get_test_value(v):\r\n if not isinstance(v, graph.Variable):\r\n v_var = theano.tensor.as_tensor_variable(v)\r\n else:\r\n v_var = v\r\n return PureOp._get_test_value(v_var)", "def get_node_value(self, n):\n node = self.get_node(n)\n if node:\n return node.value", "def get_variable(self, name, visual=None):\n # get the variables list\n if visual is None:\n variables = self.variables.values()\n else:\n variables = self.get_visual(visual)['variables']\n variables = [v for v in variables if v.get('name', '') == name]\n if not variables:\n return None\n return variables[0]", "def __getitem__(self, key):\n return self.variables[key]", "def get_value_var(self, var, data):\n \n #special case if the operand is boolean return it\n if isinstance(var, bool):\n return var\n \n try:\n #find the value for the key \n for key in str(var).split('.'):\n data = data[key]\n \n except (KeyError):\n # if key doesnt exist rather than returning None return the key as it is. This would be helpful for operands as strings\n return var\n else:\n return data", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def _get_value(self, node):\n val = None\n if isinstance(node, ast.Str):\n val = node.s\n elif isinstance(node, ast.BinOp):\n if pairwise_isinstance(\n (node.op, ast.Mod), (node.left, ast.Str),\n (node.right, ast.Name)):\n val = node.left.s % self.globals_[node.right.id]\n elif pairwise_isinstance(\n (node.op, ast.Add), (node.left, ast.Name),\n (node.right, ast.Str)):\n val = self.globals_[node.left.id] + node.right.s\n elif isinstance(node, ast.Name):\n val = self.globals_[node.id]\n\n if val is None:\n raise ValueError(\n \"Unable to find value in %s, only the following are parsed: \"\n \"GLOBAL, 'pkg.foobar', '%%s.foobar' %% GLOBAL or 'GLOBAL + \"\n \"'.foobar'\"\n % ast.dump(node))\n\n return val" ]
[ "0.6932", "0.68510634", "0.684766", "0.6606306", "0.6586858", "0.64930975", "0.6473543", "0.6453199", "0.64312464", "0.6427162", "0.64167476", "0.6407993", "0.6407993", "0.6407993", "0.64065707", "0.6314743", "0.63059956", "0.63046336", "0.62994", "0.62777895", "0.6276041", "0.6244449", "0.6243489", "0.61669654", "0.61606234", "0.6155648", "0.6063526", "0.6045447", "0.6034831", "0.6029872" ]
0.7520454
0
Get the most recent savedmodel from a base directory path.
def latest_savedmodel_path_from_base_path(base_path): protein_export_base_path = os.path.join(base_path, 'export/protein_exporter') suffixes = [ x for x in tf.io.gfile.listdir(protein_export_base_path) if 'temp-' not in x ] if not suffixes: raise ValueError('No SavedModels found in %s' % protein_export_base_path) # Sort by suffix to take the model corresponding the most # recent training step. return os.path.join(protein_export_base_path, sorted(suffixes)[-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_latest_saved_model(model_dir):\n saved_models = os.path.join(model_dir, 'best_models')\n saved_chkp = sorted([int(mdl) for mdl in os.listdir(saved_models)])\n latest = saved_chkp[-1]\n path = os.path.join(saved_models, '%d' % latest)\n\n # Next, find the full path to the saved model\n mdl_time = os.listdir(path)\n\n # Return the final path\n return os.path.join(path, mdl_time[-1])", "def get_last_saved_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='step', desc=True)", "def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None", "def get_latest_model():\n return get_models()[-1]", "def load_most_recent_model(\n model_dir: str,\n) -> Tuple[Pipeline, Dict[str, str], Dict[str, str]]:\n\n try:\n version = max(os.listdir(model_dir))\n logger.info(\"Load model\")\n model_dir = os.path.join(model_dir, version)\n\n return (\n joblib.load(os.path.join(model_dir, \"model.joblib\")),\n load_json(os.path.join(model_dir, \"schema.json\")),\n load_json(os.path.join(model_dir, \"metadata.json\")),\n )\n\n except Exception as e:\n logger.error(e)\n logger.info(\"No model found, let's train one!\")\n train(model_dir=model_dir)\n return load_most_recent_model(model_dir)", "def _find_model(model_chkp_dir, mode='last'):\n\n if mode == 'last':\n file_name = sorted(os.listdir(model_chkp_dir))[-1]\n model_path = os.path.join(model_chkp_dir, file_name)\n\n elif mode == 'best':\n raise NotImplementedError\n\n return model_path", "def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)", "def load_latest_save(self, device=None):\n return torch.load(str(self.previous_saves()[-1].absolute()), map_location=device)", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n f\"Could not find model directory under {self.model_dir}\")\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"OOD\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, f\"Could not find weight files in {dir_name}\")\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint", "def get_best_model_file_save_path(self):\n \n if self.best_model_file_saved_at_least_once:\n \n return self.absolute_model_file_save_path\n \n # create the base name path if not exists\n \n absolute_dirname = os.path.dirname(self.absolute_model_file_save_path)\n\n if not os.path.exists(absolute_dirname):\n\n os.makedirs(absolute_dirname)\n \n # update the model with respective path\n \n self.sql_model_instance.model_path = self.relative_model_file_save_path\n \n self.db.session.add(self.sql_model_instance)\n self.db.session.commit()\n \n # change the variable state to True\n \n self.best_model_file_saved_at_least_once = True\n \n return self.absolute_model_file_save_path", "def get_model_name(models_folder: os.path,\n model_base_name: str = 'sklearn_GMM_Model',\n model_name_end=None, ) -> Path:\n if not model_name_end:\n model_name_end = datetime.now().strftime('%H%M_%d%m')\n model_fname = os.path.join(models_folder, f\"{model_base_name}_{model_name_end}.pkl\")\n print(f\"Model name (get or create): {model_fname}\")\n return model_fname", "def get_most_recent_checkpoint(model_folder):\n checkpoints = [a.stem for a in model_folder.glob(\"*.index\")]\n latest_checkpoint = sorted(checkpoints, key=lambda x: -int(x.split('-')[-1]))[0]\n return latest_checkpoint", "def find_last(self):\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"FCN_DenseNet\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint", "def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))", "def find_latest_model_name(self, interval_seconds=600):\n current_model_name = None\n if os.path.exists(self.CURRENT_MODEL_FILE):\n with FileLock(file_name=self.CURRENT_MODEL_FILE) as lock:\n with open(lock.file_name, 'r') as f:\n current_model_name = f.read()\n current_model_name = current_model_name.strip()\n file_paths = get_dir_list(self._model_dir)\n latest_model_name = get_file_name(file_paths[0])\n if latest_model_name in self._validated_models:\n # ignore valiated model\n return None\n if current_model_name and current_model_name != latest_model_name:\n # new model found\n return latest_model_name\n return None", "def get_saved_model_path(training_ckpt_base):\n ckpt_dir = os.path.dirname(training_ckpt_base)\n # If using a checkpoint from the best_exporter, return its saved_model.\n if os.path.basename(ckpt_dir) == 'variables':\n return os.path.join(\n os.path.dirname(ckpt_dir),\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # If using a training checkpoint, still return the eval saved_model.\n else:\n saved_models_dir = os.path.join(ckpt_dir, 'export', 'best_exporter')\n saved_model_paths = tf.gfile.Glob(os.path.join(saved_models_dir, '*'))\n if saved_model_paths:\n return os.path.join(saved_model_paths[0],\n tf.saved_model.constants.SAVED_MODEL_FILENAME_PB)\n # Otherwise, there is not eval saved_model.\n else:\n return None", "def get_saved_model_path(self, saved_model_dir):\n\n models_data = self.load_models_data(saved_model_dir)\n if models_data is None:\n return None\n\n if self.stock_code not in models_data[\"models\"]:\n return None\n\n model_type_hash = self.get_model_type_hash()\n\n if model_type_hash not in models_data[\"models\"]:\n return None\n\n return models_data[\"models\"][self.stock_code][model_type_hash][-1][\"model_path\"]", "def best_model_from_dir(basename):\n models = glob.glob(basename + '*.index')\n best_model = None\n # get best model, if exists\n models_out = []\n for m in models:\n match = re.match(re.escape(basename) + '(1?[0-9]{4}).index', m)\n if match:\n models_out.append(int(match.groups()[0]))\n\n if models_out:\n acc = max(models_out)\n best_model = basename + str(acc)\n\n return best_model", "def get_latest_checkpoint(cls, experiment_path):\n checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)\n all_times = sorted(os.listdir(checkpoints_path), reverse=True)\n return os.path.join(checkpoints_path, all_times[0])", "def loadModel(self, savePath=\"DataStore/SavedModels/Forecasters/\",\n date=False):\n savePath = self.getProjectRoot() + savePath\n modelName = self.__class__.__name__\n savePath += modelName\n if not date:\n if len(os.listdir(savePath)) == 0:\n message = \"no saved models present\"\n raise Exception(message)\n latestSave = sorted(os.listdir(savePath),\n key=lambda x: self.getDatetime(x))[-1]\n savePath = \"{}/{}\".format(savePath, latestSave)\n self.loadAll(savePath)\n elif isinstance(date, str):\n allSaves = os.listdir(savePath)\n if date not in allSaves:\n # Raising exception as directory not present in savePath\n message = \"{} not in specified location {}\".format(date,\n savePath)\n raise Exception(message)\n else:\n self.loadAll(\"{}/{}\".format(savePath, date))", "def get_latest_train_data():\n\n data_file = os.path.join(\"models\",'latest-train.pickle')\n\n if not os.path.exists(data_file):\n raise Exception(\"cannot find {}-- did you train the model?\".format(data_file))\n\n with open(data_file,'rb') as tmp:\n data = pickle.load(tmp)\n\n return(data)", "def base_dir():\n return os.path.join(TrainFile.base_dir(), 'model')", "def load_historical_model(model_path):\n assert(osp.isdir(model_path))\n model_list = gb.glob(osp.join(model_path,'*.solverstate'))\n if model_list.__len__() == 0:\n return 0, ''\n\n iter_num = 0\n returned_model_path = ''\n for model_name in model_list:\n assert ('iter_' in model_name)\n tmp_iter = int(model_name.split('_')[-1].split('.')[0])\n if tmp_iter > iter_num:\n iter_num = tmp_iter\n returned_model_path = model_name\n\n assert (iter_num > 0) and (returned_model_path != '')\n return iter_num, returned_model_path", "def _get_model():\n with open('models/catapp_gp_model.pickle', 'rb') as modelfile:\n model = pickle.load(modelfile)\n return model", "def getModel(self):\n # Check if the dir is empty\n if os.path.exists(self.savedModelsPath) and os.listdir(self.savedModelsPath):\n log.info(\"Loading model from checkpoint %s\" % self.savedModelsPath)\n model = ModelFactory.loadFromCheckpoint(self.savedModelsPath)\n else:\n log.info(\"Creating model from %s...\" % self.modelParamsPath)\n model = self.createModelFromParams(self.getModelParams())\n\n return model", "def get_model_path(directory):\n\n path = directory + \"/model-0.h5\"\n\n # Model name\n models = [f for f in os.listdir(directory) if f.endswith(\"h5\")]\n\n if len(models) > 0:\n # get greater version\n max_v = max([m.split(\"-\")[1] for m in models])\n m = [model for model in models if model.endswith(max_v)][0]\n path = directory + \"/\" + m\n\n return path", "def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)", "def getLastFile(self):\n lastFile = None if len(self.recentFiles) == 0 else self.recentFiles[0]\n self.setLastPath(lastFile)\n return lastFile", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "def get_latest_file(path):\n try:\n latest_iteration = get_latest_iteration(path)\n return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))\n except ValueError:\n return None" ]
[ "0.80567336", "0.7754427", "0.76267993", "0.71230805", "0.6797189", "0.67275167", "0.6658544", "0.65875036", "0.6571269", "0.65707", "0.6433725", "0.63868684", "0.63734627", "0.6345704", "0.6340036", "0.6317695", "0.62807816", "0.62697744", "0.6192471", "0.61862606", "0.6143271", "0.6111571", "0.60937333", "0.60904455", "0.6083318", "0.60711914", "0.6050823", "0.6043377", "0.60336185", "0.5992257" ]
0.7921742
1
Serializes an inference result. This function is the opposite of deserialize_inference_result. The full format returned is a
def serialize_inference_result(sequence_name, activations): with io.BytesIO() as bytes_io: np.savez_compressed(bytes_io, **{sequence_name: activations}) return base64.b64encode(bytes_io.getvalue())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize_result(result: Any) -> Union[str, bytes]:\n if isinstance(result, Node):\n return result.serialize(how='default' if RESULT_FILE_EXTENSION != '.xml' else 'xml')\n else:\n return repr(result)", "def deserialize_inference_result(results_b64):\n bytes_io = io.BytesIO(base64.b64decode(results_b64))\n single_pred_dict = dict(np.load(bytes_io))\n if len(single_pred_dict) != 1:\n raise ValueError('Expected exactly one object in the structured np array. '\n f'Saw {len(single_pred_dict)}')\n sequence_name = list(single_pred_dict.keys())[0]\n activations = list(single_pred_dict.values())[0]\n return sequence_name, activations", "def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json", "def postprocess(self, inference_output):\n logger.info(inference_output)\n return inference_output", "def __repr__(self):\n return (f'rsatoolbox.inference.Result\\n'\n f'containing evaluations for {self.n_model} models\\n'\n f'evaluated using {self.cv_method} of {self.method}'\n )", "def print_inference_result(self):\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'opt'\n or self.params.model_str == 'fixedparam'\n ):\n print('*ls pt est = ' + str(self.sample_list[0].ls) + '.')\n print('*alpha pt est = ' + str(self.sample_list[0].alpha) + '.')\n print('*sigma pt est = ' + str(self.sample_list[0].sigma) + '.')\n elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':\n ls_arr = np.array([ns.ls for ns in self.sample_list])\n alpha_arr = np.array([ns.alpha for ns in self.sample_list])\n sigma_arr = np.array([ns.sigma for ns in self.sample_list])\n print('*ls mean = ' + str(ls_arr.mean()) + '.')\n print('*ls std = ' + str(ls_arr.std()) + '.')\n print('*alpha mean = ' + str(alpha_arr.mean()) + '.')\n print('*alpha std = ' + str(alpha_arr.std()) + '.')\n print('*sigma mean = ' + str(sigma_arr.mean()) + '.')\n print('*sigma std = ' + str(sigma_arr.std()) + '.')\n print('-----')", "def inference(self):\n raise NotImplementedError", "def serialize_result(service_resource_map, result_or_resulttuple):\n if is_model(type(result_or_resulttuple)):\n # Note: `result` is a plain sqla result,\n # returned by sqla query execution eg: query.one() or query.all()\n result = result_or_resulttuple\n return serialize_model(result)\n\n else:\n # Note: `resulttuple` is the combinatory sqla result of all the joins with appiled filters,\n # returned by sqla query execution eg: query.one() or query.all()\n resulttuple = result_or_resulttuple\n return serialize_tuple(resulttuple)\n\n return serialized", "def inference():\n data = request.get_json(force = True)\n\n with torch.no_grad():\n torch.cuda.empty_cache()\n image = ToTensor(Image.open(BytesIO(b64decode(data['image'])))).half().cuda().unsqueeze_(0)\n inputs = test_transform(image)\n model_id = model_usage.get(True)\n outputs = model[model_id](inputs)[0]\n model_usage.put(model_id, False)\n prediction = classes[outputs.argmax(0)]\n del inputs, outputs, image\n \n image_storage.put((data['esun_uuid'], data['image'], prediction), False)\n\n t = datetime.datetime.now()\n ts = str(int(t.utcnow().timestamp()))\n s = sha256()\n s.update((CAPTAIN_EMAIL + ts + SALT).encode(\"utf-8\"))\n server_uuid = s.hexdigest()\n\n return jsonify({'esun_uuid': data['esun_uuid'],\n 'server_uuid': server_uuid,\n 'answer': prediction,\n 'server_timestamp': time()})", "def to_representation(self) -> str:\n raise NotImplementedError()", "def inference_as_raw_output(self, image_path, to_file = False):\n\n image_np = self.load_image_into_numpy_array(image_path)\n input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8)\n\n detections_raw = self.detect_fn(input_tensor)\n # checking how many detections we got\n detections = self.process_detections(detections_raw)\n\n detections_api = {key: value.tolist() for key, value in detections.items()} # convert array to list for whole dict for API\n\n if to_file: # if saving to txt file was requested\n\n image_h, image_w, _ = image_np.shape\n\n dir_path=os.path.join(os.path.dirname(image_path),'prediction_txt_raw_output')\n\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n\n title , _ = os.path.splitext(os.path.basename(image_path))\n\n file_name = os.path.join(dir_path, f'{title}.txt')\n \n line2write = list()\n line2write.append(os.path.basename(image_path))\n \n with open(file_name, 'w') as text_file:\n # iterating over boxes\n for b, s, c in zip(\n detections['detection_boxes'], \n detections['detection_scores'], \n detections['detection_classes']\n ):\n \n y1abs, x1abs = b[0] * image_h, b[1] * image_w\n y2abs, x2abs = b[2] * image_h, b[3] * image_w\n \n list2append = [x1abs, y1abs, x2abs, y2abs, s, c]\n line2append = ','.join([str(item) for item in list2append])\n \n line2write.append(line2append)\n \n line2write = '\\n'.join(line2write)\n text_file.write(line2write + os.linesep)\n # print(f'save prediction result to {file_name} -> Done')\n \n return detections_api", "def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)", "def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)", "def __repr__(self):\n result = json.dumps({'processed': self._processed,\n 'failed': self._failed,\n 'total': self._total,\n 'time': str(self._time),\n 'chunk': self._chunk})\n return result", "def encode_result(value: object) -> bytes:\n raise NotImplementedError()", "def _to_json(self, output):\n out_dict = {\"predictions\": output}\n return json.dumps(out_dict)", "def formatResult(self, result):\r\n return str(result)", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def successMetricsAsJson(cls, spark_session: SparkSession, verificationResult, forAnalyzers: list = None):\n if forAnalyzers:\n raise NotImplementedError(\"forAnalyzers have not been implemented yet.\")\n forAnalyzers = getattr(\n spark_session._jvm.com.amazon.deequ.VerificationResult, \"successMetricsAsJson$default$2\"\n )()\n df = spark_session._jvm.com.amazon.deequ.VerificationResult.successMetricsAsJson(\n verificationResult.verificationRun, forAnalyzers\n )\n return json.loads(df)", "def result(self):\n if self.__json:\n return self.__json[\"result\"]\n else:\n return {}", "def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()", "def inference():\n\n sents = request.get_json(force=True)['sents']\n\n vecs = tokenize_inputs(sents)\n results = model(vecs)\n\n result = dict()\n result['pred'] = [str(sample.numpy()[0]) for sample in results]\n \n response = flask.Response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n\n print(result)\n\n return result", "def _get_rendered_result_as_string(self, rendered_result) -> str:\n\n result: str = \"\"\n\n if type(rendered_result) == str:\n result = rendered_result\n\n elif type(rendered_result) == list:\n sub_result_list = []\n for sub_result in rendered_result:\n res = self._get_rendered_result_as_string(sub_result)\n if res is not None:\n sub_result_list.append(res)\n\n result = \"\\n\".join(sub_result_list)\n\n elif isinstance(rendered_result, RenderedStringTemplateContent):\n result = rendered_result.__str__()\n\n elif isinstance(rendered_result, CollapseContent):\n result = rendered_result.__str__()\n\n elif isinstance(rendered_result, RenderedAtomicContent):\n result = f\"(RenderedAtomicContent) {repr(rendered_result.to_json_dict())}\"\n\n elif isinstance(rendered_result, RenderedContentBlockContainer):\n result = \"(RenderedContentBlockContainer) \" + repr(\n rendered_result.to_json_dict()\n )\n\n elif isinstance(rendered_result, RenderedTableContent):\n result = f\"(RenderedTableContent) {repr(rendered_result.to_json_dict())}\"\n\n elif isinstance(rendered_result, RenderedGraphContent):\n result = f\"(RenderedGraphContent) {repr(rendered_result.to_json_dict())}\"\n\n elif isinstance(rendered_result, ValueListContent):\n result = f\"(ValueListContent) {repr(rendered_result.to_json_dict())}\"\n\n elif isinstance(rendered_result, dict):\n result = f\"(dict) {repr(rendered_result)}\"\n\n elif isinstance(rendered_result, int):\n result = repr(rendered_result)\n\n elif rendered_result == None:\n result = \"\"\n\n else:\n raise TypeError(\n f\"Expectation._get_rendered_result_as_string can't render type {type(rendered_result)} as a string.\"\n )\n\n if \"inf\" in result:\n result = \"\"\n return result", "def output(self):\n return serialize(self.seq,\n self.mutations,\n self.scores,\n k=self.k)", "def dovetail(inference_results):\n assert inference_results\n code_tree = inference_results[0].code_tree\n code_sequence = inference_results[0].code_sequence\n assert all(res.info.keys() == {'trees_checked', 'candidates'} for res in inference_results)\n candidates = []\n for i in count():\n done = True\n for res in inference_results:\n if i < len(res.info['candidates']):\n candidates.append(res.info['candidates'][i])\n done = False\n if done:\n break\n trees_checked = sum(res.info['trees_checked'] for res in inference_results)\n return InferenceResult(code_tree=code_tree, code_sequence=code_sequence, info=dict(trees_checked=trees_checked, candidates=candidates))", "def compute_result_repr(self, result):\n try:\n if self.shell.pprint:\n try:\n result_repr = pformat(result)\n except:\n # Work around possible bugs in pformat\n result_repr = repr(result)\n if '\\n' in result_repr:\n # So that multi-line strings line up with the left column of\n # the screen, instead of having the output prompt mess up\n # their first line.\n result_repr = '\\n' + result_repr\n else:\n result_repr = repr(result)\n except TypeError:\n # This happens when result.__repr__ doesn't return a string,\n # such as when it returns None.\n result_repr = '\\n'\n return result, result_repr", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.result))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")", "def _result_wrapper(self, result, _pymatgen) -> List[ase.atoms.Atoms]:\n if _pymatgen:\n _, fname = tempfile.mkstemp(text=True)\n with open(fname, \"w\") as f:\n f.write(result.to(\"poscar\"))\n return ase.io.read(fname, index=':', format=\"vasp\")\n else:\n import json\n result = json.load(result)\n if result[\"valid_response\"]:\n _, fname = tempfile.mkstemp(text=True)\n with open(fname, \"w\") as f:\n f.write(result[\"response\"][0][\"cif\"])\n return ase.io.read(fname, index=':', format=\"cif\")\n elif not result[\"api_key_valid\"]:\n raise ValueError(f\"Invalid API_KEY: {self.API_KEY}. Check your API_KEY at https://www.materialsproject.org/open!\")", "def serialize(self) -> dict:\n return {\n \"parameters\": self.parameters,\n \"results\": self.results,\n }" ]
[ "0.68274057", "0.59581023", "0.5844812", "0.57521456", "0.56516314", "0.56136394", "0.558927", "0.5458501", "0.54018223", "0.5302885", "0.5245653", "0.52354074", "0.52354074", "0.52112603", "0.5208084", "0.5108439", "0.5067872", "0.50613916", "0.5043716", "0.5019683", "0.50195116", "0.50165826", "0.49905464", "0.4983865", "0.49753603", "0.49549598", "0.49514836", "0.49306914", "0.49156526", "0.49103716" ]
0.6606261
1
Deserializes an inference result. This function is the opposite of serialize_inference_result. The full format expected is a
def deserialize_inference_result(results_b64): bytes_io = io.BytesIO(base64.b64decode(results_b64)) single_pred_dict = dict(np.load(bytes_io)) if len(single_pred_dict) != 1: raise ValueError('Expected exactly one object in the structured np array. ' f'Saw {len(single_pred_dict)}') sequence_name = list(single_pred_dict.keys())[0] activations = list(single_pred_dict.values())[0] return sequence_name, activations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_result(self, result):\n if isinstance(result, list):\n return [self._decode_result(r) for r in result]\n elif isinstance(result, SimpleString):\n return result.value\n elif isinstance(result, SimpleError):\n return self._decode_error(result)\n else:\n return result", "def getDeserializer():", "def postprocess(self, inference_output):\n logger.info(inference_output)\n return inference_output", "def serialize_result(result: Any) -> Union[str, bytes]:\n if isinstance(result, Node):\n return result.serialize(how='default' if RESULT_FILE_EXTENSION != '.xml' else 'xml')\n else:\n return repr(result)", "def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json", "def inference(self):\n raise NotImplementedError", "def serialize_inference_result(sequence_name,\n activations):\n with io.BytesIO() as bytes_io:\n np.savez_compressed(bytes_io, **{sequence_name: activations})\n return base64.b64encode(bytes_io.getvalue())", "def deserializer():\n return bytes.decode", "def decode_when_needed(result):\n return result.decode('utf-8') if isinstance(result, bytes) else result", "def inference():\n data = request.get_json(force = True)\n\n with torch.no_grad():\n torch.cuda.empty_cache()\n image = ToTensor(Image.open(BytesIO(b64decode(data['image'])))).half().cuda().unsqueeze_(0)\n inputs = test_transform(image)\n model_id = model_usage.get(True)\n outputs = model[model_id](inputs)[0]\n model_usage.put(model_id, False)\n prediction = classes[outputs.argmax(0)]\n del inputs, outputs, image\n \n image_storage.put((data['esun_uuid'], data['image'], prediction), False)\n\n t = datetime.datetime.now()\n ts = str(int(t.utcnow().timestamp()))\n s = sha256()\n s.update((CAPTAIN_EMAIL + ts + SALT).encode(\"utf-8\"))\n server_uuid = s.hexdigest()\n\n return jsonify({'esun_uuid': data['esun_uuid'],\n 'server_uuid': server_uuid,\n 'answer': prediction,\n 'server_timestamp': time()})", "def deserialize(self, data):\r\n self.res = data.split()\r\n return self.search_deserialize()", "def decode_results(self, outputs):\n ...", "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def decode_result(as_bytes: typing.List[int]):\n raise NotImplementedError()", "def deserialize(self, obj):\n raise NotImplementedError", "def inference(self, inputs):\n # NOTE: This makes the assumption that your model expects text to be tokenized\n # with \"input_ids\" and \"token_type_ids\" - which is true for some popular transformer models, e.g. bert.\n # If your transformer model expects different tokenization, adapt this code to suit\n # its expected input format.\n input_ids = inputs[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n coarse_result = self.model.generate(input_ids = input_ids, )\n coarse_result = coarse_result.to(\"cpu\")\n fined_result = self.tokenizer.decode(coarse_result[0].tolist()[inputs[\"original_length\"]+1:],\n skip_special_tokens = True)\n #logger.info(\"Model predicted: '%s'\", fined_result)\n\n return [fined_result]", "def deserialize(self, value):\n raise NotImplementedError", "def deserialize_response(self, serialized_response):\n raise NotImplementedError()", "def _do_infer(stream_manager_api, data_input):\n stream_name = b'segmentation'\n unique_id = stream_manager_api.SendData(\n stream_name, 0, data_input)\n if unique_id < 0:\n raise RuntimeError(\"Failed to send data to stream.\")\n\n keys = [b\"mxpi_tensorinfer0\"]\n keyVec = StringVector()\n for key in keys:\n keyVec.push_back(key)\n infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)\n print(infer_result)\n if infer_result.size() == 0:\n print(\"infer_result is null\")\n exit()\n\n TensorList = MxpiDataType.MxpiTensorPackageList()\n TensorList.ParseFromString(infer_result[0].messageBuf)\n data = np.frombuffer(\n TensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)\n data = data.reshape(1, 19, 1024, 2048)\n return data", "def dovetail(inference_results):\n assert inference_results\n code_tree = inference_results[0].code_tree\n code_sequence = inference_results[0].code_sequence\n assert all(res.info.keys() == {'trees_checked', 'candidates'} for res in inference_results)\n candidates = []\n for i in count():\n done = True\n for res in inference_results:\n if i < len(res.info['candidates']):\n candidates.append(res.info['candidates'][i])\n done = False\n if done:\n break\n trees_checked = sum(res.info['trees_checked'] for res in inference_results)\n return InferenceResult(code_tree=code_tree, code_sequence=code_sequence, info=dict(trees_checked=trees_checked, candidates=candidates))", "def normalize_transfer_result(cls, result: JSON) -> JSON:\n ...", "def from_dict(cls, dikt) -> 'AnalysisResultResults':\n return util.deserialize_model(dikt, cls)", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)", "def deserialize(self, data):", "def normalize_transaction_result(cls, result: JSON) -> JSON:\n ...", "def serialize_result(service_resource_map, result_or_resulttuple):\n if is_model(type(result_or_resulttuple)):\n # Note: `result` is a plain sqla result,\n # returned by sqla query execution eg: query.one() or query.all()\n result = result_or_resulttuple\n return serialize_model(result)\n\n else:\n # Note: `resulttuple` is the combinatory sqla result of all the joins with appiled filters,\n # returned by sqla query execution eg: query.one() or query.all()\n resulttuple = result_or_resulttuple\n return serialize_tuple(resulttuple)\n\n return serialized", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def inference(model, data, diagnostics, seed, extra_fitting_args):\n pass", "async def transformer_infer(query: dict, response: Response) -> dict:\n logger.debug(\"TRANSFORMER - predicting query: \" + str(query))\n results = {}\n try:\n results = MODELS.sparse_reader.predict(query)\n logger.info(results)\n except Exception:\n logger.error(f\"Unable to get results from transformer for {query}\")\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n raise\n return results" ]
[ "0.5954675", "0.5719973", "0.5659938", "0.56532973", "0.56231964", "0.55732983", "0.55553776", "0.5442849", "0.54132456", "0.537006", "0.53153294", "0.52145404", "0.5211858", "0.51910484", "0.51749104", "0.51612943", "0.51597965", "0.51500016", "0.508528", "0.50621617", "0.5057725", "0.50444996", "0.50411934", "0.5032085", "0.49750987", "0.49431252", "0.48903507", "0.48880532", "0.48721308", "0.48697558" ]
0.71581525
0
Parses file of gzipped, newlineseparated inference results. The contents of each line are expected to be serialized as in `serialize_inference_result` above.
def parse_shard(shard_path): with tf.io.gfile.GFile(shard_path, 'rb') as f: with gzip.GzipFile(fileobj=f, mode='rb') as f_gz: for line in f_gz: # Line-by-line. yield deserialize_inference_result(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(path):\n data = gzip.open(path, 'rb')\n for byte_line in data:\n yield eval(byte_line) # return generator instance to save memory", "def parse_external_result(self, file):\n raise NotImplementedError", "def parse(fcontents, utf16=False): # TODO where does this conversion take place??\n if utf16:\n # Handle a bunch of Unicode nonsense; files appear to be in UTF-16LE\n quant_results = fcontents.split(BLOCK_DIVIDER_UTF16)[3]\\\n .decode(AWKWARD_ENCODING).encode('ascii', 'ignore').split(\"\\r\\n\")\n else:\n quant_results = fcontents.split(BLOCK_DIVIDER)[3].split(\"\\r\\n\")\n\n for res in quant_results:\n items = res.split(\"\\t\")\n if re.search(\"\\d+\", items[0]): # ignore non-digit rows\n amt = items[9]\n if amt == OUTSIDE_LADDER:\n amt = 100\n yield (int(items[0]), items[1], float(amt))", "def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)", "def parse(self, f):\n \n for line in f:\n self.parse_line(line)", "def parse_file(path):\n if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # Assume Linux has GNU objdump. This has the options:\n # -t (list symbols), -C (de-mangle symbol names)\n objdump_args = ['objdump', '-t', '-C']\n elif sys.platform.startswith('darwin'):\n # Assume OSX has LLVM objdump. This has the options:\n # -t (list symbols)\n objdump_args = ['objdump', '-t']\n objdump_args.append(path)\n with StreamingProcess(objdump_args) as proc:\n # Find the first non-blank line.\n first_line = proc.peek()\n while not first_line:\n try:\n proc.next()\n first_line = proc.peek()\n except StopIteration:\n return []\n # Is this an archive?\n match = re.match(r'^.*[Aa]rchive\\s+(.+):$', first_line)\n if match:\n # In this format we have to skip this descriptive line.\n proc.next()\n return parse_archive(match.group(1), proc)\n # Some objdumps format archives differently.\n match = re.match(r'^(.+)\\((.+)\\):\\s+file format', first_line)\n if match:\n return parse_archive(match.group(1), proc)\n # Otherwise maybe it's an object file?\n match = re.match(r'^(.+):\\s+file format', first_line)\n if match:\n return [parse_object_file(match.group(1), proc)]\n # Otherwise it's not an archive or object file.\n return []", "def parse_results(stdout):\n for line in stdout.split(b\"\\n\"):\n log.debug(\"processing line %s\", line)\n fields = line.strip().split()\n if len(fields) != 9:\n continue\n metric = fields[1].decode(\"ascii\")\n info = lm_function_map[metric]\n dtype = info['dtype']\n yield {\n \"metric\": metric,\n \"n_compart\": int(fields[3]),\n \"n_exclude\": int(fields[4].strip(b\"()\")),\n \"total\": dtype(fields[2]),\n \"min\": dtype(fields[5]),\n \"avg\": float(fields[6]),\n \"max\": dtype(fields[7]),\n \"units\": info[\"units\"],\n }", "def parse_result_file(self, filepath: str):\n\n raise NotImplementedError", "def parse_output(result):\n output = result['output']\n parsed = output.split('\\n')\n output = []\n for _line in parsed:\n output.append(_line.strip())\n log.debug(_line)\n return output", "def try7():\n path = '/Users/mayankkejriwal/datasets/memex-evaluation-november/nyu-text/'\n total = 1\n count = 1\n with gzip.open(path + 'output1.gz', 'rb') as f:\n for line in f:\n print 'line : ',\n print line\n count += 1\n if count > total:\n break", "def load_inferred(inference_path, extractors, whitelist):\n with gzip.GzipFile(inference_path) as f:\n # with open(inference_path) as f:\n with nlj.open(f, json_lib='ujson') as src:\n for row in src:\n if whitelist is not None and row['mmsi'] not in whitelist:\n continue\n # Parsing dates is expensive and all extractors use dates, so parse them\n # once up front\n row['start_time'] = _parse(row['start_time'])\n #dateutil.parser.parse(row['start_time'])\n for ext in extractors:\n ext.extract(row)\n for ext in extractors:\n ext.finalize()", "def parse_file(file_name: str, delims='\\n', encoding='utf-8', zip_type=None):\n try:\n decoder = codecs.getincrementaldecoder(encoding)()\n except LookupError as e:\n print(str(e))\n return\n\n read_buffer_size = 10*1024\n max_line_size = 100*1024\n\n if not zip_type:\n open_f = open\n elif zip_type == 'gz':\n open_f = gzip.open\n else:\n raise NotImplementedError()\n\n with open_f(file_name, 'rb') as fp:\n buffered_str = ''\n for bs in iter(partial(fp.read, read_buffer_size), b''):\n decoded_str = decoder.decode(bs)\n # FIXME: i don't think current implementation is efficient enough...\n for c in decoded_str:\n if c in delims:\n if len(buffered_str) > 0:\n yield buffered_str\n buffered_str = ''\n else:\n buffered_str += c\n if len(buffered_str) >= max_line_size:\n yield buffered_str\n buffered_str = ''\n if len(buffered_str) > 0:\n yield buffered_str", "def readArff(filename):\n \n data = []\n labels = []\n\n def parseLine(line): # csv.reader could not do this.\n isopen = False\n current = ''\n for c in line:\n if c == \"'\":\n if isopen:\n yield current\n current = ''\n isopen = not isopen\n elif isopen:\n current += c\n\n #with filename.open() as f:\n with bz2.open(str(filename)+'.bz2', 'r') as f:\n \n line = ''\n while line != '@data':\n line = f.readline().decode().strip()\n if line.startswith(\"@attribute 'classification'\"):\n line = line[line.find('{') + 1:line.find('}')]\n classes = {i:n for n,i in enumerate(parseLine(line))}\n\n for line in f.read().decode().splitlines():\n record = list(parseLine(line))\n labels.append(classes[record[-1]])\n data.append([int(x) for x in record[:-1]])\n return numpy.array(data, dtype=float), numpy.array(labels), classes", "def parse_data(fp):\n pass", "def test_parse_file(self, tmpdir):\n filename = tmpdir.join(\"test.xbb\")\n\n with open(filename, \"w\") as f:\n f.write(test_file)\n\n bb = parse(antlr4.FileStream(filename))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected", "def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)", "def parse_results_from_file(fname):\n for l in open(fname,\"r\"):\n fields=l.split()\n query_name=fields[0]\n ranks=[int(rank) for rank in fields[1::2]]\n yield (query_name, list(zip(ranks,fields[2::2])) )", "def _process(self, file: bytes) -> Sequence[List[Tuple[str]]]:\n train_data = file[: -2 * self.num_eval_symbols]\n val_data = file[-2 * self.num_eval_symbols: -self.num_eval_symbols]\n test_data = file[-self.num_eval_symbols:]\n\n symbol = '' if self.remove_end_of_line else str(ord('\\n'))\n train = ' '.join([str(c) if c != ord('\\n') else symbol for c in train_data])\n val = ' '.join([str(c) if c != ord('\\n') else symbol for c in val_data])\n test = ' '.join([str(c) if c != ord('\\n') else symbol for c in test_data])\n\n return [(train,)], [(val,)], [(test,)]", "def mk_fparse(filename, pserver):\n parses = []\n \n try:\n with open(filename) as f:\n vprint('OPEN: %s' % filename)\n xml = f.read()\n except IOError:\n print strerror(EIO)\n print(\"ERROR: Could not open %s\" % filename)\n return (parses, get_tagged_corefs(''), get_synsets({}))\n\n # remove unwanted characters from xml\n vprint('\\tPARSE: Parsing file: %s' % filename)\n # parse_tries = 0\n # while parse_tries < 5:\n # try:\n # t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))\n # parse_tries = 0\n # break\n # except jsonrpc.RPCTimeoutError:\n # vprint('\\tERROR: RPCTimeoutError - retrying')\n # parse_tries += 3\n # except jsonrpc.RPCTransportError:\n # vprint('\\tERROR: RPCTransportError - retrying')\n # data = _normalize_sentence(_remove_tags(xml))\n # sentences = [sent for part in data.split('\\n\\n')\n # for sent in sent_tokenize(part)]\n # try:\n # xml1 = data[:data.find(sentences[len(sentences)/3])]\n # xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]\n # xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]\n # t1 = loads(pserver.parse(xml1))\n # t2 = loads(pserver.parse(xml2))\n # t3 = loads(pserver.parse(xml3))\n # t = dict(t1.items() + t2.items() + t3.items())\n # parse_tries = 0\n # break\n # except Exception:\n # parse_tries = -1\n # break\n # parse_tries += 1\n # if parse_tries != 0:\n # vprint('\\tFATAL: RPCTransportError - skipping')\n \n sentences = [sent for part in xml.split('\\n\\n')\n for sent in sent_tokenize(part)]\n vprint('\\tPARSE: Parsing sentences: %s' % filename)\n for sent in sentences:\n sent_corefs = get_tagged_corefs(sent, ordered=True)\n # remove unwanted characters from xml\n sent = _normalize_sentence(_remove_tags(sent))\n parse_tries = 0\n while parse_tries < 5:\n try:\n sparse = loads(pserver.parse(sent))\n parse_tries = 0\n break\n except jsonrpc.RPCTransportError:\n vprint('\\tERROR: RPCTransportError - retrying')\n parse_tries += 1\n if parse_tries != 0:\n vprint('\\tFATAL: RPCTransportError - skipping')\n \n pparse = _process_parse(sparse, sent_corefs)\n if pparse:\n parses.append(pparse)\n\n pos_tags = {}\n for parse in parses:\n for word, attr in parse[1]:\n tags = pos_tags.get(word, set())\n tags.add(attr['PartOfSpeech'])\n pos_tags[word] = tags\n \n return parses, get_tagged_corefs(xml), get_synsets(pos_tags)", "def test_ip_extraction_gz(self):\n self.parser.parse_file(self.test_data_dir + \"/txt_ips.txt.gz\")\n self.assertEqual(self.test_data_ips, self.parser.ips)", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def parse(self, infile):\r\n raise NotImplementedError()", "def parse(self):\n for line in self.lines:\n self.read_line(line)\n return self.assembler_lines", "def read_krun_results_file(results_file):\n results = None\n with bz2.BZ2File(results_file, 'rb') as file_:\n results = json.loads(file_.read())\n return results\n return None", "def parse_lines(filename):\n line_counter = 0\n with open(filename, 'r') as rf:\n for line_txt in rf:\n try:\n d = json.loads(line_txt)\n tup = (\n d['attributed_to'],\n int(d['date_time'][8:10]),\n d.get('used_first_time_today', False),\n d.get('first_utm_source', 'unknown') \n )\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))\n yield tup # yield: https://stackoverflow.com/a/231855", "def _transform_results(self) -> List[BenchmarkResult]:\n with open(self.result_file, \"r\") as f:\n raw_results = json.load(f)\n\n parsed_results = []\n for suite in raw_results[\"suites\"]:\n parsed_results += self._parse_suite(\n results=suite,\n extra_tags={\"suite\": suite[\"name\"], \"source\": \"cpp-micro\"},\n )\n\n return parsed_results", "def parse_single_result(filePath):\r\n numThreads, queue, affinity = parse_parameters(filePath)\r\n\r\n # parse results\r\n for line in open(filePath):\r\n if \"reported_time\" in line:\r\n s = line.split(\" \")[0]\r\n \r\n bench = s.split(\".\")[3]\r\n runtime = float(get_last_column_number(line))\r\n\r\n model[bench][affinity][numThreads].append(runtime)\r\n \r\n #print(\"threads:\" + str(numThreads) + \" affinity:\" + str(affinity) + \" queue:\" + str(queue))\r", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def parse_chunks(self):\n logger.info('parse_chunks()')\n\n while (self.replay.pos < len(self.replay)):\n chunk_type = self.replay.read_uint32()\n chunk_size = self.replay.read_int32()\n offset = self.replay.bytepos\n\n if chunk_type == ChunkTypes.CHECKPOINT.value:\n self.parse_checkpoint()\n\n elif chunk_type == ChunkTypes.EVENT.value:\n self.parse_event()\n\n elif chunk_type == ChunkTypes.REPLAYDATA.value:\n self.parse_replaydata()\n\n elif chunk_type == ChunkTypes.HEADER.value:\n self.parse_header(chunk_size)\n\n self.replay.bytepos = offset + chunk_size" ]
[ "0.61069715", "0.5438141", "0.54298097", "0.5386857", "0.53416765", "0.52968967", "0.5296664", "0.525452", "0.51831484", "0.51509607", "0.51455534", "0.51425", "0.513542", "0.5126798", "0.51113844", "0.51109505", "0.50837934", "0.508252", "0.50731415", "0.5058527", "0.50574726", "0.50527096", "0.5042906", "0.5039642", "0.50318867", "0.5029778", "0.50241715", "0.49863842", "0.49842858", "0.49821797" ]
0.6268908
0
Returns whether connected and ACKed
def is_connected(self): if self.connected and self.connack_rec: return 1 return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isConnected():", "def isconnected(self) -> bool:", "def is_connected(self):\n if self._socket:\n return True\n else:\n return False", "def is_connected(self) -> bool:", "def connected(self):\n return self.port.is_open", "def isconnected(self) -> bool:\n ...", "def is_connected(self):\r\n return self.__socket is not None", "def getIsConnected(self):\n if self._socket == None:\n return False\n\n # Assume we are still connected. TODO: Do a test receive?\n return True", "def is_connected(self):\n return self.connected_channel is not None", "def is_connected(self):\n return True", "def connected(self):\n return bool(self.serial)", "def connected(self):\n return self._connection_event.is_set()", "def connected(self) -> bool:\n return self.state == STATE_CONNECTED", "def is_connected(self):\n return self.connected", "def is_connected(self):\n return self.hw_connected", "def is_connected(self):\n return self.is_connected", "def is_connected(self) -> bool:\n pass", "def is_connected(self):\n return self._socket is not None", "def is_connected(self):\n return self.serial_connection.isOpen()", "def is_connected(self):\n return self._current_protocol is not None", "def is_connected(self):\n return self.connector and self.connector.state == \"connected\"", "def is_connected(self):\n return self.connector and self.connector.state == 'connected'", "def is_connected(self) -> bool:\n try:\n # When MSG_PEEK is used the data is treated as unread\n # and the next recv shall still return this data\n data = self.socket.recv(self.BUFFER_SIZE, socket.MSG_PEEK)\n if len(data) == 0:\n return False\n return True\n except ConnectionResetError:\n return False", "def Connected(self):\r\n return self.Port.is_open", "def is_connected(self):\n return self._port.is_connected()", "def connected(self) -> bool:\n\t\treturn self._raw_result['data']['connected']", "def isConnected(self):\n return self.connected", "def connected(self):\n\n if self._connection:\n if self._connection.is_closed == True:\n return False\n else:\n return True\n else:\n return False", "def is_connected(self):\n return self._connected", "def is_connected(self):\n return self._connected" ]
[ "0.7547859", "0.7478661", "0.7429876", "0.7380853", "0.73499054", "0.730056", "0.728276", "0.7265231", "0.72598755", "0.7242413", "0.7239942", "0.72248036", "0.7222459", "0.721962", "0.72177106", "0.7213608", "0.7208526", "0.72019684", "0.71913517", "0.71713716", "0.717108", "0.71681434", "0.7155569", "0.7138056", "0.7135867", "0.7132474", "0.71314436", "0.7126795", "0.71193683", "0.71193683" ]
0.8259857
0
Renders a list of historic batches
def view_batches(request): template = 'batch_list.html' context = { 'invalid_due_date': request.GET.get('invalid_due_date') } try: get_batches(request, context) except Exception as e: context['error'] = '{} {}'.format(e, traceback.format_exc()) # TODO: GO PAF - Start context['go_funder1'] = go_funder.objects.get(funder_code='1') context['go_funder2'] = go_funder.objects.get(funder_code='2') context['go_funder3'] = go_funder.objects.get(funder_code='3') context['go_funder4'] = go_funder.objects.get(funder_code='4') context['go_funder5'] = go_funder.objects.get(funder_code='5') # TODO: GO PAF - End return render(request, template, context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_batches() -> str:\r\n tank_possibilities = [\"Albert\", \"Brigadier\", \"Camilla\", \"Dylon\", \"Emily\",\r\n \"Florence\", \"Gertrude\", \"Harry\", \"R2D2\",\r\n \"No Tank Needed\"]\r\n return render_template(\"view_batches.html\",\r\n batch_output=current_brewings,\r\n tank_options=tank_possibilities)", "def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)", "def history():\n rows = db.execute(\"SELECT * FROM histories WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", rows=rows)", "def history():\n histories = db.execute(\"SELECT * from purchases WHERE user_id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", histories=histories)", "def list_history(request):\n history = History.objects\n\n if not is_admin(request.user):\n history = history.filter(submitter=request.user)\n history = history.order_by('-submission_date')\n\n return render('editor/list_history.mako', request, {\n 'history': history,\n })", "def history():\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)", "def history():\n user_id = session[\"user_id\"]\n\n history_list = db.execute(\"SELECT symbol, price, amount, timestamp FROM stocks WHERE user_id = :user_id\", user_id = user_id)\n\n rows = len(history_list)\n\n history = []\n\n for row in range(rows-1, -1, -1):\n history.append([history_list[row][\"symbol\"], history_list[row][\"amount\"], history_list[row][\"price\"], history_list[row][\"timestamp\"]])\n\n return render_template(\"history.html\", history = history, rows = rows)", "def history():\n files = os.listdir(app.config['SEGMENTS_FOLDER'])\n if len(files) <= 3:\n flash('There is no history yet', 'warning')\n return redirect(url_for('home'))\n\n range_list, segments_list, full_track_dict_list = generate_track_and_segments_data(app, files)\n\n return render_template(\"history.html\", segments_list=segments_list,\n full_track_dict_list=full_track_dict_list,\n range_list=range_list,\n title=\"history\")", "def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])", "def history():\n\n data = db.execute(\"select * from history\")\n return render_template(\"history.html\", data=data)", "def history():\n query = Records.query.filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"history.html\", rows=query)", "def history():\n\n symbols = []\n shares = []\n prices = []\n times = []\n\n purchases = db.execute(\"SELECT * FROM purchase WHERE id = :username\", username=session[\"user_id\"])\n length = len(purchases)\n\n for item in purchases:\n symbols.append(item[\"symbol\"])\n shares.append(item[\"shares\"])\n prices.append(item[\"price\"])\n times.append(item[\"created_at\"])\n\n return render_template(\"history.html\", symbols = symbols, shares = shares, prices = prices, times = times, length = length)", "def history():\n hist = db.execute(\"SELECT * FROM shares WHERE userid = :uid ORDER BY date DESC\", uid=session[\"user_id\"])\n for h in hist:\n h[\"total\"] = round(h[\"value\"]*h[\"quantity\"],2)\n return render_template(\"history.html\", context=hist)", "def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def list_of_stories():\n return render_template(\"list_of_stories.html\", stories = stories.values())", "def history():\n transactions_list = db.execute(\"SELECT stock, units, price, time, type FROM transactions WHERE id = :current_id\",\n current_id=session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions_list)", "def history(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/history.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Work History',\r\n 'contact': Contact.objects.get(pk=1),\r\n 'work_histories': WorkHistory.objects.all().order_by('-start_date'),\r\n 'current_application': Application.objects.get(pk=1),\r\n\r\n })\r\n )", "def history(request):\n\treturn render(request,'history.html',None)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", history = history)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", history = history)", "def render(self,screen):\n for boids in self.boid_list:\n boids.render(screen)", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)", "def view_command():\n listing.delete(0, END)\n for row in backend.view():\n listing.insert(END, row)", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def retag_all_batches(apps, schema_editor):\n pass", "def history():\n\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user ORDER BY date\",\n user=user)\n\n return render_template(\"history.html\", stocks = owned)", "def _draw_stream_histories(self, painter):\n\t\tfor stream in sorted(self._history_boundaries.keys()):\n\t\t\tself._draw_stream_history(painter, stream)" ]
[ "0.66533065", "0.62283266", "0.60942686", "0.60652864", "0.59861034", "0.5948222", "0.58719194", "0.5858274", "0.5838414", "0.58291125", "0.5793077", "0.57760614", "0.5725792", "0.5713223", "0.5686191", "0.56724244", "0.56462044", "0.5618192", "0.56146944", "0.5612472", "0.5587022", "0.55756986", "0.5551178", "0.5548425", "0.55289096", "0.5527737", "0.552388", "0.5509087", "0.550629", "0.54883593" ]
0.6571801
1
Computes the number of timesteps needed to get the simulation past tmax
def num_sim_steps(self, dt, tmax): return int(np.ceil(tmax / dt))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_time_steps (self):\n return self.degreedays.thawing.num_timesteps", "def n_timesteps(self) -> int:\n return len(self.time)", "def num_timesteps(self):\n return self._num_timesteps", "def runtime(self):\n return self.tmax_epochs - self.tmin_epochs", "def max_steps(self) -> int:\n return pulumi.get(self, \"max_steps\")", "def get_max_steps(self):\n return float(self.trainer_parameters[\"max_steps\"])", "def get_max_steps(self):\n return float(self.trainer_parameters['max_steps'])", "def get_max_steps(self) -> float:\n return float(self.trainer_parameters[\"max_steps\"])", "def get_num_timesteps(self) -> int:\n return len(self._indices)", "def get_max_iters():\n return 2000", "def total_steps(self) -> int:\n if self.hparams.max_steps:\n return self.hparams.max_steps\n else:\n assert self.hparams.max_epochs is not None\n num_devices = max(1, self.hparams.gpus * self.hparams.num_nodes) # TODO: consider num_tpu_cores\n effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices\n dataset_size = len(self.train_loader.dataset)\n return (dataset_size / effective_batch_size) * self.hparams.max_epochs", "def total_steps(self) -> int:\n if self.hparams.max_steps:\n return self.hparams.max_steps\n else:\n assert self.hparams.max_epochs is not None\n num_devices = max(1, self.hparams.gpus * self.hparams.num_nodes) # TODO: consider num_tpu_cores\n effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices\n dataset_size = len(self.train_loader.dataset)\n return (dataset_size / effective_batch_size) * self.hparams.max_epochs", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric", "def num_steps(self):\n return self.torsoStepCount() + 1", "def calc_stepsize(self):\n # Calculate step size\n step = 1.0/((self.n+self.d)*np.max(np.sum(self.p, axis=0)))\n return step", "def get_steps_num():\n return 0", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def get_timesteps(self):\n return len(self.measurement_history)", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def max_trials(self) -> int:\n return self._max_trials", "def num_steps(self) -> int:\n return self._num_steps", "def number_of_steps(self) -> int:\n return len(self.step_points)", "def max_temp(self):\n return 30", "def n_t(t, mean, sd):\n\n lmbd = np.random.normal(mean, sd, 1)\n vec_time = np.arange(t)\n nt_iter = np.zeros(t)\n\n while lmbd < 0:\n lmbd = np.random.normal(mean, sd, 1)\n\n for j in vec_time:\n nt_iter[j] = np.ceil(lmbd*j)\n return nt_iter", "def timeScale(self) -> int:\n return int(1 / (1 - self.momentum))", "def n(self):\n return self._time_axis.size", "def _get_wall_clock_step_time_threshold(self):\n if self.constants.physical:\n sim = self.mujoco_simulation.mj_sim\n return float(sim.nsubsteps) * sim.model.opt.timestep\n else:\n # No minimum threshold for simulation.\n return 0" ]
[ "0.7784215", "0.75834614", "0.7177998", "0.7147208", "0.70487255", "0.68899256", "0.68801206", "0.6862652", "0.68206733", "0.6729553", "0.6663248", "0.6663248", "0.6661061", "0.6585944", "0.65505177", "0.65240073", "0.65111583", "0.65044254", "0.6500881", "0.6465097", "0.6460338", "0.64459807", "0.64159787", "0.63452697", "0.63212013", "0.630823", "0.6292617", "0.62813705", "0.6249347", "0.62375945" ]
0.8402275
0
Wrapper around fast_pad_shift() in fastshift.c Works out the optimum number of indicies by which y2 needs to be shifted to have the minimum least squares error between the two
def fast_pad_shift(self, y1, y2): if len(y1) != len(y2): raise ValueError("Input sizes must be the same") y1_contig = np.ascontiguousarray(y1, dtype=np.float64) y2_contig = np.ascontiguousarray(y2, dtype=np.float64) y1_ptr = y1_contig.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) y2_ptr = y2_contig.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) return self.libcalcsim.fast_pad_shift(y1_ptr, y2_ptr, len(y1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perfect_shift(y):\n return np.append([y[-1]],y[0:-1])", "def lrshift(val, n) -> np.int64:\n return (val % (1 << 64)) >> n", "def find_best_shift(l_x_as, l_y_as, l_yp_as, r_x_orig_as, r_y_as, r_yp_as, x_stride):\n logg = logging.getLogger(f\"c.{__name__}.find_best_shift\")\n # logg.debug(f\"Start find_best_shift\")\n\n shift_start = timer()\n\n # find how much the right segment can shift\n shift_11 = l_x_as[-1] - r_x_orig_as[-1] - (l_x_as[-1] - l_x_as[0]) / 2\n shift_10 = l_x_as[-1] - r_x_orig_as[0]\n # align the shift on the stride grid: now if you sum the shift to l_x_as\n # the points are still aligned.\n shift_a_11 = math.floor(shift_11 / x_stride) * x_stride\n shift_a_10 = math.ceil(shift_10 / x_stride) * x_stride\n shift_range = np.arange(shift_a_11, shift_a_10 + x_stride / 2, x_stride)\n # recap = f\"shift_11: {shift_11} shift_10: {shift_10}\"\n # recap += f\" shift_a_11: {shift_a_11} shift_a_10: {shift_a_10}\"\n # logg.debug(recap)\n\n best_dist_x_touch = float(\"inf\")\n best_shift = None\n best_r_x_as = None\n best_l_tang_y_as = None\n\n tangent_times = []\n\n for shift in shift_range:\n r_x_as = r_x_orig_as + shift\n # logg.debug(f\"\\nNew shift r_x_as[0]: {r_x_as[0]} r_x_as[-1]: {r_x_as[-1]}\")\n\n # ax.plot(r_x_as, r_y_as, color=\"y\", ls=\"-\", marker=\"\")\n # ax.plot(r_x_as, r_y_as, color=\"y\", ls=\"\", marker=\".\")\n\n # find the indexes where the tangent touches the curves\n l_xid, r_xid, l_tang_y_as, tangent_time = find_lower_tangent(\n l_x_as, l_y_as, r_x_as, r_y_as, r_yp_as\n )\n\n tangent_times.append(tangent_time)\n\n if l_xid == -1:\n # logg.debug(f\"Tangent not found\")\n continue\n\n # find where the tangent touches the segments\n l_x_touch = l_x_as[l_xid]\n r_x_touch = r_x_as[r_xid]\n\n if r_x_touch < l_x_touch:\n # logg.debug(f\"Tangent goes the wrong way\")\n continue\n\n # compute how far are the two contacts\n dist_x_touch = r_x_touch - l_x_touch\n\n # if this shift does not improve the distance, go to the next\n if dist_x_touch >= best_dist_x_touch:\n continue\n\n # save info about the current shift\n best_dist_x_touch = dist_x_touch\n best_shift = shift\n best_r_x_as = r_x_as\n best_l_tang_y_as = l_tang_y_as\n\n # extend the points of contact\n best_l_x_ext = l_x_touch - dist_x_touch / 2\n best_r_x_ext = r_x_touch + dist_x_touch / 2\n # recap = f\"l_x_touch: {l_x_touch:.4f} r_x_touch {r_x_touch:.4f}\"\n # recap += f\" dist_x_touch: {dist_x_touch:.4f}\"\n # recap += f\" best_l_x_ext: {best_l_x_ext:.4f} best_r_x_ext {best_r_x_ext:.4f}\"\n # logg.debug(recap)\n\n tangent_time_mean = sum(tangent_times) / len(tangent_times)\n logg.debug(f\"Mean tangent time: {tangent_time_mean:.6f}\")\n\n # extract the best value as current (r_x_as = r_x_orig_as + best_shift)\n r_x_as = best_r_x_as\n\n # find the index of the touch point on the left segment\n l_lower_x = l_x_as < best_l_x_ext\n # argmin returns the *first* occurrence of the min value\n l_id_e_x = np.argmin(l_lower_x)\n # for symmetry, if we can, we keep the previous index (the last of the True)\n if l_id_e_x > 0:\n l_id_e_x -= 1\n\n # find the index of the touch point on the right segment\n r_lower_x = r_x_as < best_r_x_ext\n r_id_e_x = np.argmin(r_lower_x)\n\n # recap = f\"l_id_e_x: {l_id_e_x}\"\n # recap += f\" l_x_as[l_id_e_x]: {l_x_as[l_id_e_x]:.4f}\"\n # recap += f\" r_id_e_x: {r_id_e_x}\"\n # recap += f\" r_x_as[r_id_e_x]: {r_x_as[r_id_e_x]:.4f}\"\n # logg.debug(recap)\n\n # find the extended contact point\n l_p_ext = OrientedPoint(\n l_x_as[l_id_e_x], l_y_as[l_id_e_x], slope2deg(l_yp_as[l_id_e_x])\n )\n r_p_ext = OrientedPoint(\n r_x_as[r_id_e_x], r_y_as[r_id_e_x], slope2deg(r_yp_as[r_id_e_x])\n )\n _, ext_x_as, ext_y_as, _ = compute_aligned_cubic_segment(\n l_p_ext,\n r_p_ext,\n x_stride,\n )\n\n # recap = f\"l_id_e_x: {l_id_e_x}\"\n # recap += f\" l_x_as[l_id_e_x]: {l_x_as[l_id_e_x]:.4f}\"\n # recap += f\" ext_x_as[0]: {ext_x_as[0]:.4f}\"\n # recap += f\" ext_x_as[-1]: {ext_x_as[-1]:.4f}\"\n # recap += f\" r_id_e_x: {r_id_e_x}\"\n # recap += f\" r_x_as[r_id_e_x]: {r_x_as[r_id_e_x]:.4f}\"\n # logg.debug(recap)\n\n # show id to use when plotting\n l_id_s_x = l_id_e_x\n r_id_s_x = r_id_e_x\n\n # fix the ext ids, there is a gap of 1 (one) stride missing on one side\n if not math.isclose(l_x_as[l_id_e_x], ext_x_as[0]):\n logg.debug(f\"Left not close\")\n # check that is not the last\n if l_id_e_x < l_x_as.shape[0] - 1:\n l_id_s_x = l_id_e_x + 1\n\n if not math.isclose(r_x_as[r_id_e_x], ext_x_as[-1]):\n logg.debug(f\"Right not close\")\n # check that is not the first\n if r_id_e_x > 0:\n r_id_s_x = r_id_e_x - 1\n\n shift_end = timer()\n logg.debug(f\"Time to find optimal shift: {shift_end - shift_start:.6f}\")\n\n return (\n best_shift,\n best_r_x_as,\n best_l_tang_y_as,\n l_id_s_x,\n r_id_s_x,\n l_p_ext,\n r_p_ext,\n ext_x_as,\n ext_y_as,\n )", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def _rel_shift(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(0, 3, 2, 1)\n idx = torch.arange(klen, device=xs.device)\n k_idx, q_idx = idx.unsqueeze(0), idx.unsqueeze(1)\n rel_pos_idx = torch.abs(k_idx - q_idx)\n if klen != qlen:\n rel_pos_idx = rel_pos_idx[:, :qlen]\n mask = xs.new_ones(qlen, klen, dtype=torch.bool if torch_12_plus else torch.uint8)\n mask = torch.tril(mask, diagonal=0).transpose(1, 0)\n rel_pos_idx[mask] *= -1\n rel_pos_idx = klen - qlen - rel_pos_idx\n rel_pos_idx[rel_pos_idx < 0] *= -1\n if self.clamp_len > 0:\n rel_pos_idx.clamp_(max=self.clamp_len)\n rel_pos_idx = rel_pos_idx.expand_as(xs)\n x_shift = torch.gather(xs, dim=2, index=rel_pos_idx)\n x_shift = x_shift.permute(0, 3, 2, 1)\n return x_shift", "def _rel_shift(x, klen=-1):\n\n x = tf.transpose(x, perm=[2, 3, 0, 1])\n x_size = tf.shape(x)\n\n x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])\n x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])\n x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])\n x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])\n\n x = tf.transpose(x, perm=[2, 3, 0, 1])\n\n return x", "def resampz(x, m_type, shift=1):\n sx = np.array(x.shape)\n\n if m_type == 0 or m_type == 1:\n y = np.zeros((sx[0] + np.abs(shift * (sx[1] - 1)), sx[1]))\n\n if m_type == 0:\n shift1 = np.arange(0, sx[1]) * (- shift)\n else:\n shift1 = np.arange(0, sx[1]) * shift\n\n if shift1[-1] < 0:\n shift1 = shift1 - shift1[-1]\n\n for n in range(sx[1]):\n y[shift1[n] + np.arange(0, sx[0]), n] = x[:, n]\n\n # Remove extra rows\n start = 0\n finish = y.shape[0]\n\n while np.linalg.norm(y[start, :], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[finish-1, :], 2) == 0:\n finish -= 1\n\n y = y[start:finish, :]\n\n elif m_type == 2 or m_type == 3:\n y = np.zeros((sx[0], sx[1] + np.abs(shift * (sx[0] - 1))))\n\n if m_type == 2:\n shift2 = np.arange(0, sx[0]) * (- shift)\n else:\n shift2 = np.arange(0, sx[0]) * shift\n\n if shift2[-1] < 0:\n shift2 = shift2 - shift2[-1]\n\n for m in range(sx[0]):\n y[m, shift2[m] + np.arange(0, sx[1])] = x[m, :]\n\n # Remove extra rows\n start = 0\n finish = y.shape[1]\n\n while np.linalg.norm(y[:, start], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[:, finish-1], 2) == 0:\n finish -= 1\n\n y = y[:, start:finish]\n\n else:\n print('Error: type not valid.')\n y = 0\n\n return y", "def __rlshift__(self, other):\r\n return NotImplemented", "def __rlshift__(self, other):\r\n return NotImplemented", "def shift_augmentation():\n shift = np.random.randint(-200, 201, size=2)\n return lambda image: shift_with_extension(image, shift)", "def pad_shorter(a, b, ii=0):\n # global data_orig\n # global data_new\n diff = a[ii].size - b[ii].size\n # print(\"diff\", diff)\n if (diff > 0):\n b = np.concatenate((np.zeros(\n (b.shape[0], diff//2)), b, np.zeros((b.shape[0], diff - diff//2))), axis=1)\n elif (diff < 0):\n diff = -diff\n a = np.concatenate((np.zeros(\n (a.shape[0], diff//2)), a, np.zeros((a.shape[0], diff - diff//2))), axis=1)\n\n return (a, b)", "def fftshift(X):\r\n # return scipy.fftpack.fftshift(X)\r\n return np.fft.fftshift(X)", "def iquadshift(a):\r\n if len(a.shape) == 1 :\r\n b = np.roll(a, +(a.shape[-1]/2-1), -1)\r\n else :\r\n b = np.roll(a, +(a.shape[-2]/2-1), -2)\r\n b = np.roll(b, +(b.shape[-1]/2-1), -1)\r\n return b", "def __lshift__(self, other):\r\n return NotImplemented", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n\tfactor = 2**(1.0 * n / 12.0)\n\tstretched = stretch(snd_array, 1.0/factor, window_size, h)\n\treturn speedx(stretched[window_size:], factor)", "def _width_shift_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random sign for the shifting direction\n sign = np.random.randint(0, 2)\n shift_pix = np.random.randint(0, self.shift)\n x = shift(x, [0, sign*shift_pix])\n m = shift(m, [0, sign*shift_pix, 0], mode='nearest')\n return x,m", "def quadshift(a):\r\n if len(a.shape) == 1 :\r\n b = np.roll(a, -(a.shape[-1]/2-1), -1)\r\n else :\r\n b = np.roll(a, -(a.shape[-2]/2-1), -2)\r\n b = np.roll(b, -(b.shape[-1]/2-1), -1)\r\n return b", "def min_error(a: np.ndarray, b: np.ndarray, shift: int,\n l: Optional[int] = None, w: int = 0) -> int:\n if l is None:\n l = len(a)\n shifts = np.arange(-shift, shift)\n errors = np.array([sq_error(a[w:l-w], b[w+n:l-w+n]) for n in shifts])\n if not len(errors):\n return 0\n return shifts[errors.argmin()]", "def get_global_shift(im1, im2, params):\n if im2 is None:\n return None\n shift = fft_flowvectors(im1, im2, global_shift=True)\n return shift", "def roll(arrayin, shift = (0, 0), silent = True):\r\n arrayout = arrayin.copy()\r\n # if shift is integer valued then use np.roll\r\n if (type(shift[0]) == int) or (type(shift[0]) == np.int) or (type(shift[0]) == np.int32) or (type(shift[0]) == np.int64):\r\n if shift[-1] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-1], -1)'\r\n arrayout = np.roll(arrayout, shift[-1], -1)\r\n # if shift is 1d then don't roll the other dim (if it even exists)\r\n if len(arrayout.shape) >= 2 :\r\n if shift[-2] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-2], -2)'\r\n arrayout = np.roll(arrayout, shift[-2], -2)\r\n # if shift is float valued then use the Fourier shift theorem\r\n elif (type(shift[0]) == float) or (type(shift[0]) == np.float32) or (type(shift[0]) == np.float64):\r\n # if shift is 1d\r\n if len(shift) == 1 :\r\n if silent == False :\r\n print 'arrayout = fftn_1d(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn_1d(arrayout)'\r\n arrayout = fftn_1d(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn_1d(arrayout)\r\n elif len(shift) == 2 :\r\n if silent == False :\r\n print 'arrayout = fftn(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn(arrayout)'\r\n arrayout = fftn(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn(arrayout)\r\n return arrayout", "def _sbd(x, y):\r\n ncc = _ncc_c(x, y)\r\n idx = ncc.argmax()\r\n dist = 1 - ncc[idx]\r\n yshift = roll_zeropad(y, (idx + 1) - max(len(x), len(y)))\r\n\r\n return dist, yshift", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def shift(shape, stride, anchors):\n shift_x = (keras.backend.arange(0, shape[1], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n shift_y = (keras.backend.arange(0, shape[0], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n\n shift_x, shift_y = meshgrid(shift_x, shift_y)\n shift_x = keras.backend.reshape(shift_x, [-1])\n shift_y = keras.backend.reshape(shift_y, [-1])\n\n shifts = keras.backend.stack([\n shift_x,\n shift_y,\n shift_x,\n shift_y\n ], axis=0)\n\n shifts = keras.backend.transpose(shifts)\n number_of_anchors = keras.backend.shape(anchors)[0]\n\n k = keras.backend.shape(shifts)[0] # number of base points = feat_h * feat_w\n\n shifted_anchors = keras.backend.reshape(anchors, [1, number_of_anchors, 4]) + keras.backend.cast(\n keras.backend.reshape(shifts, [k, 1, 4]), keras.backend.floatx())\n shifted_anchors = keras.backend.reshape(shifted_anchors, [k * number_of_anchors, 4])\n\n return shifted_anchors", "def find_best_shift(\n y, y_template, shift_polarity=False, skip_freq=1):\n if not np.any(y) or not np.any(y_template):\n warnings.warn('y or y_template is 0. Returning 0 shift')\n return 0, 1.\n n = len(y_template)\n n_shift = len(y) - n\n assert n_shift % skip_freq == 0\n n_shift = int(n_shift / skip_freq)\n corrs = np.zeros(n_shift)\n for i in range(n_shift):\n y_shift = y[i*skip_freq:i*skip_freq+n]\n corrs[i] = np.corrcoef(y_shift, y_template)[0, 1]\n if not shift_polarity:\n best_shift = np.argmax(corrs) * skip_freq\n polarity = 1.\n else:\n if corrs.max() < -corrs.min():\n best_shift = np.argmin(corrs) * skip_freq\n polarity = -1.\n return best_shift, polarity", "def womyshift(hop):\n import matplotlib.pyplot as plt\n import logging\n from tmath.wombat.inputter import inputter\n from tmath.wombat.wshow import wshow\n plt.cla()\n plt.plot(hop[0].wave,hop[0].flux,drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.title(hop[0].obname)\n wshow()\n\n print('Routine to linearly shift flux scale\\n')\n\n shift=inputter('Enter flux shift: ','float',False)\n\n hop[0].flux=hop[0].flux+shift\n\n plt.plot(hop[0].wave,hop[0].flux,drawstyle='steps-mid')\n\n logging.debug('File {} flux scale shifted by {} A'.format\\\n (hop[0].obname,shift))\n\n #FIX header\n return hop", "def _power2d_driver(\n dx_lon, dx_time, y1, y2, /,\n nperseg=None,\n wintype='boxcar',\n center=np.pi,\n axis_lon=-1,\n axis_time=0,\n detrend='constant',\n):\n # Checks\n dx_lon = quack._as_step(dx_lon)\n dx_time = quack._as_step(dx_time)\n copower = y1 is not y2\n if len(y1.shape) < 2:\n raise ValueError('Need at least rank 2 array.')\n if y1.shape != y2.shape:\n raise ValueError(f'Shapes of y1 {y1.shape} and y2 {y2.shape} must match.')\n\n # Permute and flatten\n with quack._ArrayContext(y1, y2, push_right=(axis_time, axis_lon)) as context:\n # Get window and flattened, trimmed data\n y1, y2 = context.data\n win, winloc, y1, y2 = _window_data(y1, y2, nperseg=nperseg, wintype=wintype)\n pm = win.size // 2\n nwindows = winloc.size\n nextra, ntime, ncyclic = y1.shape\n\n # Setup output arrays\n Py1 = np.nan * np.empty((nextra, nwindows, pm * 2, ncyclic // 2))\n if copower:\n Py2 = Py1.copy()\n C = Py1.copy()\n Q = Py1.copy()\n\n # 2D transform for each window on non-cyclic dimension\n # Note since we got the rfft (not fft) in one direction, only have half the\n # coefficients (they are symmetric); means for correct variance, have to\n # double th power. These are analagous to Libby's notes for complex space\n for k in range(nextra):\n if (\n np.any(~np.isfinite(y1[k, :, :]))\n or np.any(~np.isfinite(y2[k, :, :]))\n ):\n warnings._warn_climopy('Skipping array with missing values.')\n continue\n for i, idx in enumerate(winloc):\n Fy1 = _fft2d(pm, win, y1[k, idx - pm:idx + pm, :], detrend)\n Py1[k, i, :, :] = np.abs(Fy1) ** 2\n Py1[k, i, :, :-1] *= 2\n if copower:\n Fy2 = _fft2d(pm, win, y2[k, idx - pm:idx + pm, :], detrend)\n Py2[k, i, :, :] = np.abs(Fy2) ** 2\n Phi1 = np.arctan2(Fy1.imag, Fy1.real)\n Phi2 = np.arctan2(Fy2.imag, Fy2.real)\n C[k, i, :, :] = np.abs(Fy1) * np.abs(Fy2) * np.cos(Phi1 - Phi2)\n Q[k, i, :, :] = np.abs(Fy1) * np.abs(Fy2) * np.sin(Phi1 - Phi2)\n Py2[k, i, :, :-1] *= 2\n C[k, i, :, :-1] *= 2\n Q[k, i, :, :-1] *= 2\n\n # Get output arrays\n # TODO: Why remove mean power?\n # NOTE: This Phi relationship is still valid. Check Libby notes. Divide\n # here Q by C and the Ws cancel out, end up with average phase diff.\n # NOTE: Default order is to go 0 1 ... N/2 -N/2 ... -1. We reorder so\n # frequencies are from -N/2 ... -1 1 ... N/2.\n fx_time = np.fft.fftfreq(2 * pm)\n fq = np.abs(fx_time[pm:pm + 1]) # Nyquist frequency singleton array\n fx_time = np.concatenate((-fq, fx_time[pm + 1:], fx_time[1:pm], fq), axis=0)\n fx_lon = np.fft.rfftfreq(ncyclic)[1:]\n Py1 = Py1.mean(axis=1)\n arrays = (Py1,)\n if copower:\n Py2 = Py2.mean(axis=1)\n C = C.mean(axis=1)\n Q = Q.mean(axis=1)\n Coh = (C ** 2 + Q ** 2) / (Py1 * Py2)\n Phi = np.arctan2(Q, C) # phase\n Phi[Phi >= center + np.pi] -= 2 * np.pi\n Phi[Phi < center - np.pi] += 2 * np.pi\n arrays = (C, Q, Py1, Py2, Coh, Phi)\n\n # Replace context data\n context.replace_data(*arrays)\n\n # Return unflattened data\n if copower:\n return (fx_lon / dx_lon, fx_time / dx_time, *context.data)\n else:\n return (fx_lon / dx_lon, fx_time / dx_time, context.data)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift" ]
[ "0.6466211", "0.60334533", "0.5912409", "0.58153456", "0.5814866", "0.5708873", "0.56890917", "0.56569624", "0.56388104", "0.56388104", "0.55942255", "0.55764323", "0.5565782", "0.55486435", "0.5502997", "0.5485006", "0.5467454", "0.54600763", "0.5451744", "0.54203475", "0.5412962", "0.54117405", "0.5397123", "0.53926456", "0.5390179", "0.538645", "0.5365647", "0.5330404", "0.5330404", "0.53262824" ]
0.714775
0
Counts the number of times a pattern is in text.
def pattern_count(text, pattern): return len([i for i in range(0, len(text) - len(pattern) + 1) if text[i:i + len(pattern)] == pattern])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count(text):\n return len(text)", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def text_count(self, text):\n res = 0\n for intv in self:\n if intv._text == text:\n res += 1\n return res", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))", "def __count_text(text, limit=None):\n\n count = 0\n is_text = True\n for i, c in enumerate(text):\n if is_text and c == '\\33':\n is_text = False\n\n if is_text:\n count += 1\n if limit is not None and count == limit:\n return i + 1\n\n if not is_text and c == 'm':\n is_text = True\n\n if limit is not None:\n return len(text)\n else:\n return count", "def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting position should be between 0 and the size \" + \\\n \"of the DNA\")\n\n k = len(pattern)\n count = 0\n end = len(DNA) - k + 1 if end == 0 else end\n\n for i in range(0, end):\n if hamming_distance(DNA[i:i+k], pattern) <= mutation_thresh:\n count += 1\n\n return count", "def text_cond_count(self, condition):\n res = 0\n for intv in self:\n if condition(intv._text):\n res += 1\n return res", "def get_count(self):\n\n return len(self._pattern)", "def count_exclamations(txt):\n count = 0\n for c in txt:\n if c == '!':\n count += 1\n return count", "def find_all_occurrences_brute_force(pattern, text):\n\n result = []\n\n if len(text) < len(pattern):\n return result\n\n for i in range(0, len(text) - len(pattern) + 1):\n matched = True\n\n k = 0\n for j in range(i, i + len(pattern)):\n if pattern[k] != text[j]:\n matched = False\n break\n k += 1\n\n if matched:\n result.append(i)\n\n return result", "def duplicate_count(text):\n n = 0\n for c in set(text.lower()):\n if text.lower().count(c) > 1:\n n += 1\n return n", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def count_token(text):\n count=0\n if isinstance(text, list):\n for ayah in text:\n count=count+ayah.count(' ')+1\n else:\n count=text.count(' ')+1\n\n return count", "def count_sentences(text):\n count = 0\n terminals = '.;?!'\n for character in text:\n \n if character in terminals:\n count += 1\n\n return count", "def count(self, word):\n pass", "def find_all_occurrences_knuth_morris_pratt(pattern, text):\n\n if '$' in pattern:\n raise ValueError('The pattern contains $.')\n if '$' in text:\n raise ValueError('The text contains $.')\n\n if pattern == '':\n return list(range(0, 1 + len(text)))\n\n work_text = pattern + '$' + text\n prefix_function = Util._compute_prefix_function(work_text)\n\n result = []\n for i in range(len(pattern) + 1, len(work_text)):\n if prefix_function[i] == len(pattern):\n result.append(i - 2 * len(pattern))\n\n return result", "def find_all_occurrences_knuth_morris_pratt(pattern, text):\n\n if '$' in pattern:\n raise ValueError('The pattern contains $.')\n if '$' in text:\n raise ValueError('The text contains $.')\n\n if pattern == '':\n return list(range(0, 1 + len(text)))\n\n work_text = pattern + '$' + text\n prefix_function = Util._compute_prefix_function(work_text)\n\n result = []\n for i in range(len(pattern) + 1, len(work_text)):\n if prefix_function[i] == len(pattern):\n result.append(i - 2 * len(pattern))\n\n return result" ]
[ "0.85784805", "0.8479635", "0.80442816", "0.76850486", "0.75937515", "0.756862", "0.75528634", "0.73120034", "0.7220975", "0.70781434", "0.70781434", "0.6994858", "0.6943327", "0.691691", "0.6849857", "0.6743599", "0.6734056", "0.6717872", "0.66522515", "0.66400516", "0.66149616", "0.65665126", "0.64970523", "0.63445914", "0.63093525", "0.62569785", "0.6188041", "0.6170467", "0.6164275", "0.6164275" ]
0.8523106
1
Returns the patterns(kmers) whose frequency of occurrence (count) is greater than t.
def frequent_words_t(text, k, t): frequent_patterns = [] count = {} for i in range(0, len(text)-k+1): pattern = text[i:i+k] count[i] = pattern_count(text, pattern) if count[i] >= t and pattern not in frequent_patterns: frequent_patterns.append(text[i:i+k]) return frequent_patterns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def chunkedClumpFinder(sequence, k, L, t):\n\n frequentPatterns = set([])\n for i in range(len(sequence)):\n window = sequence[i:i + L]\n frequencies = {}\n\n for j in range(len(window)):\n pattern = window[j:j + k]\n if pattern not in frequencies:\n frequencies[pattern] = 1\n else:\n frequencies[pattern] += 1\n for p in frequencies:\n if frequencies[p] >= t:\n frequentPatterns.add(p)\n return frequentPatterns", "def clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n for i in range(0, len(text) - L + 1):\n subtext = text[i:i + L]\n freq_array = compute_freq(subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def faster_frequent_words(text, k):\n frequent_patterns = []\n freq_array = compute_freq(text, k)\n max_count = max(freq_array)\n for i in range(0, len(text)-k+1):\n if freq_array[i] == max_count:\n pattern = number_to_pattern(i, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def filter_patterns(self,threshold):\n if threshold is not None:\n pass #learn threshold\n return filter(lambda pattern: pattern.score > threshold, self.patterns)", "def frequent_words(text, k):\n\n frequent_patterns = []\n freq_map = frequency_table(text, k)\n max_val = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == max_val:\n frequent_patterns.append(key)\n return frequent_patterns", "def frequent_words(text, k):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n max_count = max(count.values()) if count else 0\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n if count[i] == max_count and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def find_clumps(text, k, L, t):\n clumps = []\n k_mers = frequent_words_t(text, k, t)\n for k_mer in k_mers:\n positions = find_position(k_mer, text)\n for position in positions:\n subtext = text[position:position + L]\n count = pattern_count(subtext, k_mer)\n if count >= t and k_mer not in clumps:\n clumps.append(k_mer)\n return clumps", "def catsAtFreqThresh(self, stopPoint):\n freqDist = self.freqDist(self.cats)\n mostFreq = max(freqDist.keys())\n rows = []\n catsSeen = 0\n totalCats = len(self.cats)\n for threshold in xrange(0, mostFreq):\n totalCats -= freqDist.get(threshold, 0)\n if threshold >= stopPoint:\n break\n rows.append(totalCats)\n return rows", "def frequency_array(text, k):\r\n freq_list=[]\r\n p_list=pattern_list(k)\r\n for i in p_list:\r\n freq_list.append(PatternCount(i,text))\r\n return freq_list", "def most_frequent_kmers(DNA, k, mutation_thresh=0, reverse=False):\n freq_dict = dictionaries.FrequencyDict(DNA, k, mutation_thresh)\n\n kmers_found = set()\n current_highest_freq = 0\n\n for kmer, frequency in freq_dict.items():\n\n rev = reverse_complement(kmer, as_string=True)\n if reverse and rev in freq_dict:\n frequency += freq_dict[rev]\n if frequency > current_highest_freq:\n current_highest_freq = frequency\n kmers_found = set([kmer])\n elif frequency == current_highest_freq:\n kmers_found.add(kmer)\n\n return kmers_found", "def filter_terms_by_cnt(self, min_count):\n filtered_terms = [term for term in self.term2id if self.term_frequent[term] >= min_count]\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)", "def generateFrequentPatterns(self, tidList):\n tidList1 = {}\n if len(tidList) == 0:\n print(\"There are no more candidate sets\")\n else:\n key = list(tidList.keys())\n for i in range(0, len(key)):\n nighbousItems=self.getNighboirItems(key[i])\n for j in range(i + 1, len(key)):\n if not key[j] in nighbousItems:\n \tcontinue\n intersectionList = list(set(tidList[key[i]]).intersection(set(tidList[key[j]])))\n itemList = []\n if len(intersectionList) >= self.minSup:\n itemList += key[i], key[j]\n itemList.sort()\n tidList1[tuple(itemList)] = intersectionList\n\n return tidList1", "def _analyse_topics(frequencies):\n freq = frequencies[0]\n freq_ref = frequencies[1]\n the_dict = weight_term_frequencies_one(freq,freq_ref)\n sorted_toks = sorted(the_dict.iteritems(),\n key=operator.itemgetter(1))\n \n sorted_toks.reverse()\n sorted_toks = sorted_toks[:400]\n final_toks = []\n for (k,v) in sorted_toks:\n best = True\n for (k1,v1) in sorted_toks:\n if k != k1:\n if (abs(v1-v)) < 0.2:\n if k in k1:\n best = False\n #print \"dropped\", k\n if best:\n final_toks.append((k,v))\n \n very_final_toks = {}\n for (k,v) in final_toks:\n close_val = [(k2,v2) for k2,v2 in final_toks[:50] if abs(v-v2) < 0.2]\n if len(close_val) < 1:\n very_final_toks[k] = v\n else:\n similar = [(k3,v3,len(k3)) for k3,v3 in close_val if difflib.SequenceMatcher(None,k,k3).quick_ratio() > 0.89]\n if len(similar) > 1:\n a,b,c = sorted(similar,key=operator.itemgetter(2))[0]\n very_final_toks[a] = b\n else:\n very_final_toks[k] = v\n \n very_final_toks = sorted(very_final_toks.iteritems(),\n key=operator.itemgetter(1))\n very_final_toks.reverse()\n return very_final_toks", "def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols", "def add_clump_forming_kmers(counts, clumpFormingKmers):\n for kmer in counts:\n if counts[kmer] >= t:\n clumpFormingKmers.add(kmer)\n\n return clumpFormingKmers", "def select_strong_subjective_patterns(self):\n self.ss_patterns = {}\n for pattern in self.learned_patterns.keys():\n freq = self.learned_patterns[pattern]['freq']\n prob = self.learned_patterns[pattern]['prob']\n if freq >= self.t1_threshold and prob >= self.t2_threshold: \n self.ss_patterns[pattern] = self.learned_patterns[pattern]\n # delete some patterns with low frequency and probability for efficiency\n elif freq > 5 and freq < ((self.t1_threshold*3) / 4):\n \tdel(self.learned_patterns[pattern])\n \n sorted_ss = sorted(self.ss_patterns.iteritems(),key=lambda x: x[1]['prob'], reverse=True)\n self.sorted_ss_patterns = sorted_ss \n for (s,v) in sorted_ss:\n title = (Tcolors.OKGREEN+s+Tcolors.ENDC+\" \").ljust(70,'-') \n pbs = (str)(v['freq'])+\"/\" + Tcolors.CYAN + (str)(v['prob']) + Tcolors.ENDC\n if self.debug: print title + \"------------> \" + pbs\n if self.debug: print\n if len(sorted_ss) > self.pl_threshold:\n \tself.t1_threshold += 1", "def find_frequent_patterns(transactions, support_threshold, possible_class_values):\n tree = FPTree(transactions, support_threshold, possible_class_values, None, None)\n # print(tree.to_string())\n return tree.mine_patterns(support_threshold)", "def sorted_frequent_words(text: str, k: int) -> Set[str]:\n frequent_patterns = set()\n index = []\n count = []\n for i in range(len(text) - k + 1):\n pattern = text[i:i+k]\n index.append(pattern_to_number(pattern))\n count.append(1)\n\n sorted_index = sorted(index)\n\n for i in range(1, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i-1] + 1\n\n max_count = max(count)\n\n for i in range(len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.add(pattern)\n\n return frequent_patterns", "def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.", "def filter_min(counter: Counter, min_freq: int):\n return Counter({t: c for t, c in counter.items() if c >= min_freq})", "def filterCnts(cnts, threshold = 5):\n\tc = []\n\tfor item in cnts:\n\t\tif threshold < len(item):\n\t\t\tc.append(item)\n\treturn c", "def get_all_kmers(pattern, k, ordered=False):\n ordered_kmers = [pattern[i:i + k] for i in range(len(pattern) - k + 1)]\n if ordered:\n return ordered_kmers\n return set(ordered_kmers)", "def frequent_words_with_mismatches(text, k, d):\n\n patterns = []\n freq_map = {}\n n = len(text)\n for i in range(n - k + 1):\n pattern = text[i:i + k]\n pattern_rc = reverse_complement(pattern)\n neighborhood = neighbors(pattern, d) + neighbors(pattern_rc, d)\n for j in range(len(neighborhood)):\n neighbor = neighborhood[j]\n if neighbor not in freq_map.keys():\n freq_map[neighbor] = 1\n else:\n freq_map[neighbor] = freq_map[neighbor] + 1\n m = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == m:\n patterns.append(key)\n return patterns", "def divide_and_count(L_windows, k, t):\n\n results = set()\n\n for L_mer in L_windows:\n k_windows = divide_genome(L_mer, k) # We extract in a list all the possible k-mers\n\n # Generate a set of unique elements to avoid multicounts...\n k_windows_set = set(k_windows)\n\n for k_window in k_windows_set:\n if k_windows.count(k_window) == t:\n results.add(k_window)\n\n\n print(\"\\t\".join(results))", "def words_len_greater_than(n):\n\treturn {w for w in word_set if len(w) > n}", "def frequent_words_by_sorting(text, k):\n frequent_patterns = []\n index = []\n count = []\n for i in range(0, len(text) - k + 1):\n pattern = text[i:i + k]\n index[i] = pattern_to_number(pattern)\n count[i] = 1\n sorted_index = sorted(index)\n for i in range(0, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i -1] + 1\n max_count = max(count)\n for i in range(0, len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def recall(gt, pred, k):\n k = min(len(pred), k)\n return sum([int(pred[i] in gt) for i in range(k)]) / len(gt)", "def cpgram(ts):\n spectrum = np.fft.fft(ts)\n n = len(ts)\n y = (np.sqrt(spectrum.real**2 + spectrum.imag**2)) ** 2 / n\n if n % 2 == 0:\n n -= 1\n y = y[:n]\n\n freq = np.linspace(0, 0.5, n, endpoint=True)\n crit = 1.358 / (np.sqrt(n) + 0.12 + 0.11 / np.sqrt(n))\n\n return y, freq, crit" ]
[ "0.6151683", "0.6078879", "0.58491397", "0.57430816", "0.5682803", "0.5476455", "0.54654175", "0.5314422", "0.52966666", "0.5261653", "0.51758975", "0.5156601", "0.5104174", "0.50993943", "0.5099003", "0.50974345", "0.50800914", "0.5068089", "0.5063559", "0.50433385", "0.5015172", "0.49939704", "0.49848256", "0.49810144", "0.49250373", "0.4905814", "0.49016276", "0.48983485", "0.48862153", "0.4843312" ]
0.67880565
0
Returns and array with all positions where the pattern is found within the text.
def find_position(pattern, text): positions = [] i = 0 while text[i:] and text[i:].find(pattern) != -1: position = text[i:].find(pattern) + i positions.append(position) i = position + 1 return positions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __naive_matching(pattern, text):\n matched_positions = []\n n = len(text)\n m = len(pattern)\n # Last possible pattern starting position in the text is n - m\n for pos in range((n - m) + 1):\n if pattern == text[pos:(pos+m)]:\n matched_positions.append(pos)\n return matched_positions", "def find_all_indexes(text, pattern):\n # COMPLEXITY: O(n) b/c we have to traverse string to find all matching patterns\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # vars to keep track of start_indexes\n # and returned value from find_index\n start_indexes = []\n found_index = ''\n # check if text is same as pattern\n if text == pattern:\n return [0]\n # edge case: pattern is empty\n if pattern == found_index:\n return [i for i in range(len(text))]\n start = 0\n # loop until item returned from find_index function is none\n # or we find the word\n while found_index is not None:\n found_index = find_index(text, pattern, start)\n if found_index is not None:\n start_indexes.append(found_index)\n # make sure there is more text to keep checking\n if found_index == len(text) - (len(pattern) - 1):\n return start_indexes\n start = found_index + 1\n else:\n break\n return start_indexes", "def get_pattern_positions(sequence, pattern):\n return [pos.span()[0] for pos in re.finditer(r'(' + pattern + ')', sequence)]", "def find_all_occurrences_brute_force(pattern, text):\n\n result = []\n\n if len(text) < len(pattern):\n return result\n\n for i in range(0, len(text) - len(pattern) + 1):\n matched = True\n\n k = 0\n for j in range(i, i + len(pattern)):\n if pattern[k] != text[j]:\n matched = False\n break\n k += 1\n\n if matched:\n result.append(i)\n\n return result", "def find_pattern(pattern, text):\r\n pat_text = pattern + '$' + text\r\n\r\n prefixes = [None] * len(pat_text)\r\n\r\n prefixes[0] = 0\r\n border = 0\r\n matches = []\r\n for idx, letter in enumerate(pat_text[1:], start=1):\r\n while border > 0 and letter != pat_text[border]:\r\n border = prefixes[border - 1]\r\n if letter == pat_text[border]:\r\n border = border + 1\r\n else:\r\n border = 0\r\n\r\n if border == len(pattern):\r\n matches.append(idx - len(pattern) - border)\r\n prefixes[idx] = border\r\n\r\n return matches", "def find_all_indexes(text, pattern):\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n\n which = 'find_all_indexes'\n\n # is_pattern = False # flag to determine if pattern found\n # list_of_indices = [] # list to hold indices\n #\n # if pattern == '': # all strings have empty substrings\n # for i in range(len(text)): # add all characters in text to list\n # list_of_indices.append(i)\n # return list_of_indices\n #\n # for i in range(len(text) - len(pattern) + 1): # same comments as find_index function,\n # is_pattern = True # except instead of returning index,\n # if text[i] == pattern[0]: # we add index to a list that we return at the end\n # for j in range(len(pattern)):\n # if text[i + j] != pattern[j]:\n # is_pattern = False\n # if is_pattern:\n # list_of_indices.append(i)\n # return list_of_indices\n\n # sub_string = ''\n # list_of_indices = []\n #\n # if pattern == '': # all strings have empty substrings\n # for i in range(len(text)): # add all characters in text to list\n # list_of_indices.append(i)\n # return list_of_indices\n #\n # for i in range(len(text) - len(pattern) + 1): # Iterate through text with limit based on length of pattern\n # for j in range(i, len(pattern) + i): # Iterate through as many characters as pattern has\n # sub_string += text[j] # add characters to substring\n # if pattern == sub_string: # compare\n # list_of_indices.append(i) # pattern exists\n # sub_string = '' # reset substring if not found\n # return list_of_indices # pattern does not exist\n\n return string_master_func(text, pattern, which)\n\n\n # TODO: Implement find_all_indexes here (iteratively and/or recursively)", "def match(self):\r\n results = []\r\n pattern = self.pattern\r\n text = self.text\r\n m = len(self.pattern)\r\n n = len(self.text)\r\n p = self._prefix\r\n k = 0\r\n for i in range(n):\r\n while k > 0 and text[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == text[i]:\r\n k = k+1\r\n if k == m:\r\n results.append(i-m+1)\r\n k = p[k-1]\r\n return results", "def all_matches(self, pattern):\n\n pat_len = len(pattern)\n if pat_len > self.text_len:\n raise ValueError(\"Pattern length is bigger than text\")\n\n bad_char_table = self._bad_char_table(pattern)\n L = self._good_suffix_table_one(pattern)\n H = self._good_suffix_table_two(pattern)\n indexes = []\n\n pat_end_ind = pat_len - 1\n prev_end = -1\n # prev_end - previous index of pattern end relative to text (for Galil's rule)\n # p - index of char in pattern\n # t - index of char in text\n while pat_end_ind < self.text_len:\n p = pat_len - 1\n t = pat_end_ind\n while p >= 0 and t > prev_end and pattern[p] == self.text[t]:\n p -= 1\n t -= 1\n if p == -1 or t == prev_end: # Matched or holds Galil's rule\n indexes.append(pat_end_ind - pat_len + 1)\n pat_end_ind += pat_len - H[1] if pat_len > 1 else 1\n else:\n char_shift = self._bad_char_shift(self.text[t], bad_char_table)\n if p+1 == pat_len:\n suffix_shift = 1\n elif L[p] == -1:\n suffix_shift = pat_len - H[p+1]\n else:\n suffix_shift = pat_len - L[p]\n shift = max(char_shift, suffix_shift)\n prev_end = pat_end_ind if shift >= p+1 else prev_end #update parameter for Galil's rule\n pat_end_ind += shift\n\n return indexes", "def find_pattern_positions(pattern, DNA, mutation_thresh=0):\n positions = []\n for i in range(len(DNA) - len(pattern) + 1):\n\n current_pattern = DNA[i: (i + len(pattern))]\n if hamming_distance(pattern, current_pattern) <= mutation_thresh:\n positions.append(i)\n\n return positions", "def find_iter(text, pattern):\n pos = -1\n while True:\n pos = text.find(pattern, pos+1)\n if pos < 0:\n break\n yield pos", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def occurrences_re(pattern, string):\n exp = re.compile(pattern)\n o = []\n for i in exp.finditer(string):\n o.append([i.start(), i.end()])\n return o", "def positions(self, searchstr: str):\n indices = []\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index >= 0:\n indices.append(index)\n return indices", "def get_offsets(word, raw_text):\n try:\n match = re.search(word, raw_text)\n return (match.start(), match.end())\n except AttributeError: #could not find word\n return (0, 0)", "def find_pattern(pattern, text):\r\n result = []\r\n pattern_len = len(pattern)\r\n d_pattern_len = 2 * pattern_len\r\n if pattern_len > len(text):\r\n return []\r\n\r\n new_string = pattern + \"$\" + text\r\n prefix_function_res = prefix_function(new_string)\r\n return [x - d_pattern_len for (x, y) in filter(lambda x: x[1] == pattern_len, enumerate(prefix_function_res))]", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass", "def find_all(self, p):\n ln = self.ln\n t = self.t\n occurrences = []\n hints = self.__getHints(p)\n for i in hints:\n # compare rest char in pattern with chars in text after hinted substring\n if t[i + ln:i + len(p)] == p[ln:]:\n occurrences.append(i)\n return occurrences", "def str_search(pattern, text):\n N, M = len(text), len(pattern)\n i = 0 \n while i <= N-M:\n j = 0 \n while j < M:\n if text[i + j] != pattern[j]:\n break \n j += 1\n if j == M:\n return i\n i += 1\n return N", "def match(self, pattern, text, case_insensitive=False):\n # If pattern is empty string return every position in text\n if len(pattern) > 0:\n if case_insensitive is True:\n pattern = pattern.lower()\n text = text.lower()\n if self.method == 'naive':\n return self.__naive_matching(pattern, text)\n elif self.method == 'finite-state':\n return self.__finite_state_matching(\n text,\n self.__compute_transitions(pattern,\n set(text)\n ),\n len(pattern)\n )\n else:\n return [i for i in range((len(text) + 1))]", "def find(self, text, term):\n\t\tlistOfResults = list()\n\n\t\tcurrentIndex = 0\n\t\ttermLength\t = len(term)\n\t\tappend\t\t = listOfResults.append\n\n\t\twhile currentIndex >= 0:\n\t\t\tcurrentIndex = text.find(term, currentIndex+1)\n\t\t\tappend((currentIndex, currentIndex+termLength))\n\n\t\t# Return listOfResults[:-1] because the last tuple contains -1 (negative one)\n\t\treturn listOfResults[:-1]", "def find_all_occurrences_knuth_morris_pratt(pattern, text):\n\n if '$' in pattern:\n raise ValueError('The pattern contains $.')\n if '$' in text:\n raise ValueError('The text contains $.')\n\n if pattern == '':\n return list(range(0, 1 + len(text)))\n\n work_text = pattern + '$' + text\n prefix_function = Util._compute_prefix_function(work_text)\n\n result = []\n for i in range(len(pattern) + 1, len(work_text)):\n if prefix_function[i] == len(pattern):\n result.append(i - 2 * len(pattern))\n\n return result", "def find_all_occurrences_knuth_morris_pratt(pattern, text):\n\n if '$' in pattern:\n raise ValueError('The pattern contains $.')\n if '$' in text:\n raise ValueError('The text contains $.')\n\n if pattern == '':\n return list(range(0, 1 + len(text)))\n\n work_text = pattern + '$' + text\n prefix_function = Util._compute_prefix_function(work_text)\n\n result = []\n for i in range(len(pattern) + 1, len(work_text)):\n if prefix_function[i] == len(pattern):\n result.append(i - 2 * len(pattern))\n\n return result", "def search (text, pattern):\n\tfor i in xrange(len(text)-len(pattern)+1):\n\t\tfound = True\n\t\tfor j in xrange(len(pattern)):\n\t\t\tif text[i+j] != pattern[j]:\n\t\t\t\tfound = False\n\t\t\t\tbreak\n\t\tif found:\n\t\t\tprint 'Pattern found at index:', i\n\treturn", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def find_all(s, t):\n offset = 0\n starts = []\n start = s.find(t, offset)\n while start != -1:\n starts.append(start + 1) # Uses one-based indexing, as Pfam does.\n offset = start + 1\n start = s.find(t, offset)\n return starts", "def subtree_matching(self, subtree):\n\t\t#TODO implement this in a faster way\n\t\ttext = self.preorder_traverse_to_list()\n\t\tpattern = subtree.preorder_traverse_to_list()\n\n\t\tprint text\n\t\tprint pattern\n\n\t\tmatches = []\n\t\tfor i in range(len(text)):\n\t\t\tif text[i:i+len(pattern)] == pattern:\n\t\t\t\tmatches.append(i)\n\t\treturn matches", "def brute_force_string_matching(text: str, pattern: str) -> int:\n n = len(text)\n m = len(pattern)\n for i in range(n-m):\n j = 0\n while j < m and pattern[j] == text[j+i]:\n j += 1\n if j == m:\n return i\n return -1", "def find_index(text: str, pattern: str, start=0) -> (int, None):\n #COMPLEXITY: O(n) b/c we have to iterate over string to find pattern\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # edge case: pattern is longer text than text\n if len(pattern) > len(text):\n return None\n # edge case: patten is empty string\n if len(pattern) == 0:\n return 0\n # case: pattern equals text:\n # pattern is in text and starting index would be 0\n if pattern == text:\n return start\n # keeps track of start index and will be returned value\n start_index = ''\n # keeps track of matched letters between pattern and text\n matched = ''\n for i in range(start, len(text)):\n letter = text[i]\n if letter == pattern[len(matched)]:\n # matched is empty, this is currently start_index\n if len(matched) == 0:\n start_index = i\n matched += letter\n else:\n matched = ''\n if letter == pattern[len(matched)]:\n start_index = i\n matched += letter\n # if matched is pattern then\n # pattern was found in text\n if matched == pattern:\n return start_index\n return None", "def search(self, txt, pat):\n m, n, = len(pat), len(txt)\n bad_char = self.get_bad_char(pat, m)\n s = 0\n\n while s <= n - m:\n j = m - 1 # start at last index of pattern string.\n\n # move left in pattern string when a matched character is found.\n while j >= 0 and pat[j] == txt[s + j]:\n j -= 1\n\n if j < 0:\n print(f\"Pattern occur at shift = {s}\")\n s += (m - bad_char[ord(txt[s + m])] if s + m < n else 1)\n else:\n s += max(1, j - bad_char[ord(txt[s + j])])\n\n return n", "def find_index(text, pattern):\n assert isinstance(text, str), 'text is not a string: {}'.format(text)\n assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)\n # TODO: Implement find_index here (iteratively and/or recursively)\n\n which = 'find_index'\n\n # is_pattern = False # flag to determine if pattern found in text\n #\n # if pattern == '': # all strings have empty string pattern\n # return 0\n #\n # for i in range(len(text) - len(pattern) + 1): # Iterate through text with limit based on length of pattern\n # is_pattern = True # reset flag to True\n # if text[i] == pattern[0]: # if we find first character in pattern\n # for j in range(len(pattern)): # check the next few characters up to length of pattern\n # if text[i + j] != pattern[j]: # if any of next few characters don't match\n # is_pattern = False # pattern doesn't exist\n # if is_pattern: # pattern exists\n # return i\n # return None # pattern not found\n\n # if pattern == '': # All strings have an empty string\n # return 0\n #\n # sub_string = ''\n # for i in range(len(text) - len(pattern) + 1): # Iterate through text with limit based on length of pattern\n # for j in range(i, len(pattern) + i): # Iterate through as many characters as pattern has\n # sub_string += text[j] # add characters to substring\n # if pattern == sub_string: # compare\n # return i # pattern exists\n # sub_string = '' # reset substring if not found\n # return None # pattern does not exist\n\n return string_master_func(text, pattern, which)" ]
[ "0.82084525", "0.7794501", "0.77193886", "0.7659554", "0.72130907", "0.7196132", "0.7073557", "0.70187694", "0.6968592", "0.69200224", "0.6911996", "0.68648756", "0.6805044", "0.6797464", "0.6721601", "0.6673448", "0.659498", "0.6563846", "0.6534992", "0.65285796", "0.64577717", "0.64577717", "0.64347225", "0.6389563", "0.6375719", "0.63741314", "0.63610095", "0.625991", "0.6215345", "0.61983144" ]
0.8557723
0
A kmer can be arranged in a 4^k ordered array. This function returns an array of the frequency of each of the kmers in the text. The position of the array can be matched with the pattern using pattern_to_number and number_to_pattern.
def compute_freq(text, k): freq_array = [0 for i in range(0, 4**k)] for i in range(0, len(text) - k + 1): pattern = text[i:i + k] j = pattern_to_number(pattern) freq_array[j] += 1 # return ' '.join([str(i) for i in freq_array]) return freq_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frequency_array(text, k):\r\n freq_list=[]\r\n p_list=pattern_list(k)\r\n for i in p_list:\r\n freq_list.append(PatternCount(i,text))\r\n return freq_list", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result", "def frequency_table(text, k):\n\n freq_map = {}\n len_text = len(text)\n for i in range(len_text - k + 1):\n pattern = text[i:i + k]\n if pattern not in freq_map:\n freq_map[pattern] = 1\n else:\n freq_map[pattern] = freq_map[pattern] + 1\n return freq_map", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count", "def get_kmers(file, size):\n\tkmers = defaultdict(int)\n\tregex = re.compile('[' + string.punctuation + ']')\n\tfor line in open(file):\n\t\tfor word in [regex.sub('', w) for w in line.lower().split()]:\n\t\t\tnkmers = len(word) - size + 1\n\t\t\tfor kmer in [word[i:i+size] for i in range(nkmers)]:\n\t\t\t\tkmers[kmer] += 1\n\treturn kmers", "def codonfreqs_kmerdf(kmertable): \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)", "def kmer_preprocess(filename, k):\n\tkmers = {}\n\twith open(filename) as infile:\n\t\tline = infile.readline()\n\t\tseq = \"\"\n\t\tfor line in infile:\n\t\t\tfor ch in line:\n\t\t\t\tif is_valid_char(ch):\n\t\t\t\t\tseq = seq + ch\n\t\t\t\t\tif len(seq) > k:\n\t\t\t\t\t\tseq = seq[1:]\n\t\t\t\t\tif len(seq) == k:\n\t\t\t\t\t\tif seq in kmers:\n\t\t\t\t\t\t\tkmers[seq] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkmers[seq] = 1\n\tpairs = sorted(kmers.items(), reverse=True, key=lambda x: x[1])\n\treturn pairs", "def kmer_composition(k, text):\r\n # TODO: your code here\r\n d = {}\r\n for i in range(len(text)-k+1):\r\n print(text[i:k+i])\r\n ''' \r\n if(text[i:k+i] in d.keys()):\r\n d[text[i:k+i]] += 1\r\n else:\r\n d[text[i:k+i]] = 1\r\n print(d)\r\n '''", "def clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n for i in range(0, len(text) - L + 1):\n subtext = text[i:i + L]\n freq_array = compute_freq(subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def get_counts_from_kmer_list(filenames_lst, alphabet, kmin, kmax):\n # initialize the array container\n dic_list = []\n # iterates through the file paths\n for filename in filenames_lst:\n # get the sequences and ids\n for n, seq in parse_fasta(filename):\n # append the counts to the array\n dic_list.append(count_kmers(seq, alphabet, kmin, kmax))\n return dic_list", "def faster_frequent_words(text, k):\n frequent_patterns = []\n freq_array = compute_freq(text, k)\n max_count = max(freq_array)\n for i in range(0, len(text)-k+1):\n if freq_array[i] == max_count:\n pattern = number_to_pattern(i, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def sequence_to_kmer_freqs(seq:str, kmer_size:int=6, step_size:int=1, ignore_N:bool=True, normalize:bool=True):\n assert np.all([base in BASE_TO_INT for base in seq]), f\"Unrecognized characters in {seq}\"\n # Split into kmers\n seq_kmers = [seq[i:i+kmer_size] for i in range(0, len(seq) - kmer_size + 1, step_size)]\n # Generate all possible kmers\n possible_kmers = generate_all_kmers(kmer_size, ignore_N=ignore_N)\n if ignore_N:\n seq_kmers = [kmer for kmer in seq_kmers if \"N\" not in kmer]\n indices = [possible_kmers[kmer] for kmer in seq_kmers]\n retval = np.zeros(len(possible_kmers))\n if not seq or not seq_kmers or len(seq) < kmer_size: # For an empty/too-short sequence return a matrix of 0's\n return retval\n np.add.at(retval, indices, 1) # Increment all specified indices in place, increment multiple times if index occurs multiple times\n if normalize:\n retval /= len(seq_kmers)\n assert np.isclose(np.sum(retval), 1.0)\n return retval", "def count_kmers(file_name, k, verbose=False):\n if verbose:\n start = time.time()\n print('Counting kmers in {}'.format(file_name))\n total_kmers = 0\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n total_kmers += len(line) - k # eliminate new-line\n line_num += 1\n if verbose:\n end = time.time()\n print('{} kmers are counted in {:.2f} seconds'.format(\n total_kmers, end - start))\n return total_kmers", "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def count_mers(sequence, alphabet, kmin, kmax):\n alphabet = set(alphabet)\n counts = defaultdict(int)\n for kmer in get_kmers_from_sequence(sequence, kmin, kmax):\n if set(kmer).issubset(alphabet):\n counts[kmer] = counts.get(kmer, 0) + 1\n return counts", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def frequent_words_by_sorting(text, k):\n frequent_patterns = []\n index = []\n count = []\n for i in range(0, len(text) - k + 1):\n pattern = text[i:i + k]\n index[i] = pattern_to_number(pattern)\n count[i] = 1\n sorted_index = sorted(index)\n for i in range(0, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i -1] + 1\n max_count = max(count)\n for i in range(0, len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def kmer_count(self,size):\n if size == 1:\n return ['A','T','C','G']\n else:\n result = []\n for seq in Analyze_DNA_Sequence.kmer_count(self,size-1):\n for base in ['A','T','C','G']:\n result.append(seq+base)\n return result", "def string_rank (text):\n freq_set = { \n ' ':13.00, 'e':12.70, 't':9.056, 'a':8.167, 'o':7.507, 'i':6.966, 'n':6.749, \n 's':6.327, 'h':6.094, 'r':5.987, 'd':4.253, 'l':4.025, 'u':2.758, 'b':1.492, \n 'c':2.782, 'f':2.228, 'g':2.015, 'j':0.153, 'k':0.772, 'm':2.406, 'p':1.929, \n 'q':0.095, 'v':0.978, 'w':2.360, 'x':0.150, 'y':1.974, 'z':0.074 }\n return sum([freq_set[letter] for letter in text if letter in freq_set])", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def frequent_words(text, k):\n\n frequent_patterns = []\n freq_map = frequency_table(text, k)\n max_val = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == max_val:\n frequent_patterns.append(key)\n return frequent_patterns", "def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter", "def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output", "def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)", "def find_kmers(in_fasta, k):\n n= len(in_fasta)-k+1\n kmers=[]\n for i in range(0, n):\n kmers.append(in_fasta[i:i+k])\n return(kmers)", "def get_yules(s, ResArray):\n tokens = tokenize(s)\n token_counter = collections.Counter(tok.upper() for tok in tokens)\n m1 = sum(token_counter.values())\n m2 = sum([freq ** 2 for freq in token_counter.values()])\n i = (m1 * m1) / (m2 - m1)\n k = 1 / i * 10000\n ResArray[6] = k\n return k" ]
[ "0.75466734", "0.67215174", "0.67213583", "0.6492698", "0.64837915", "0.6456329", "0.64193577", "0.64173275", "0.63039887", "0.62665516", "0.62640476", "0.6240153", "0.61800927", "0.6087899", "0.60426986", "0.6032585", "0.60165304", "0.5976605", "0.5971062", "0.5964768", "0.596362", "0.5944227", "0.5923782", "0.5922908", "0.59138906", "0.5906911", "0.59046245", "0.58659655", "0.5849564", "0.58418083" ]
0.79192466
0
Same that find_clumps but using the improved functions to find frequent patterns
def clumps_finding(text, k, t, L): frequent_patterns = [] clumps = [0 for i in range(0, 4**k)] for i in range(0, len(text) - L + 1): subtext = text[i:i + L] freq_array = compute_freq(subtext, k) for index, freq in enumerate(freq_array): if freq >= t: clumps[index] = 1 for index, clump in enumerate(clumps): if clump == 1: pattern = number_to_pattern(index, k) frequent_patterns.append(pattern) return frequent_patterns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def find_clumps(text, k, L, t):\n clumps = []\n k_mers = frequent_words_t(text, k, t)\n for k_mer in k_mers:\n positions = find_position(k_mer, text)\n for position in positions:\n subtext = text[position:position + L]\n count = pattern_count(subtext, k_mer)\n if count >= t and k_mer not in clumps:\n clumps.append(k_mer)\n return clumps", "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def chunkedClumpFinder(sequence, k, L, t):\n\n frequentPatterns = set([])\n for i in range(len(sequence)):\n window = sequence[i:i + L]\n frequencies = {}\n\n for j in range(len(window)):\n pattern = window[j:j + k]\n if pattern not in frequencies:\n frequencies[pattern] = 1\n else:\n frequencies[pattern] += 1\n for p in frequencies:\n if frequencies[p] >= t:\n frequentPatterns.add(p)\n return frequentPatterns", "def faster_frequent_words(text, k):\n frequent_patterns = []\n freq_array = compute_freq(text, k)\n max_count = max(freq_array)\n for i in range(0, len(text)-k+1):\n if freq_array[i] == max_count:\n pattern = number_to_pattern(i, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def frequent_words_with_mismatches(text, k, d):\n\n patterns = []\n freq_map = {}\n n = len(text)\n for i in range(n - k + 1):\n pattern = text[i:i + k]\n pattern_rc = reverse_complement(pattern)\n neighborhood = neighbors(pattern, d) + neighbors(pattern_rc, d)\n for j in range(len(neighborhood)):\n neighbor = neighborhood[j]\n if neighbor not in freq_map.keys():\n freq_map[neighbor] = 1\n else:\n freq_map[neighbor] = freq_map[neighbor] + 1\n m = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == m:\n patterns.append(key)\n return patterns", "def get_multi_pattern_count(word, patterns):\n\n distinct_positions = set()\n for pattern in patterns:\n result = Util.find_all_occurrences_knuth_morris_pratt(pattern,\n word)\n distinct_positions |= set(result)\n\n return distinct_positions", "def frequent_words(text, k):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n max_count = max(count.values()) if count else 0\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n if count[i] == max_count and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def find_clumps(DNA, k, L, t):\n assert len(DNA) >= L\n clumps = set()\n\n # Construct the frequency dict for the first region of size L in the DNA\n freq_dict = dictionaries.FrequencyDict(DNA[:L], k)\n\n # For each kmer in the first window, check if frequency >= t and correspondingly\n # add the kmer to the clumps set\n kmers = set()\n for i in range(L - k + 1):\n kmer = DNA[i: i + k]\n if not kmer in kmers:\n kmers.add(kmer)\n _t = freq_dict[kmer]\n if _t >= t:\n clumps.add(kmer)\n\n # Decrease the frequency of the first kmer for the next iteration, as our\n # sliding window will escape it\n first_kmer = DNA[0:k]\n freq_dict[first_kmer] -= 1\n\n # Cool beans -- the initial freqs are set up and the window is in place.\n # Now, we're ready to go through all other regions of length L in the DNA\n for i in range(1, len(DNA) - L + 1):\n\n # If not the first iteration, increase the frequency of the recently added\n # last kmer. If that frequency >= t, add the kmer to the set of clumps\n last_kmer = DNA[i+L-k : i+L]\n freq_dict[last_kmer] += 1\n if freq_dict[last_kmer] >= t:\n clumps.add(last_kmer)\n\n # Decrease the frequency of the first kmer in the region, as\n # the sliding window will escape it\n first_kmer = DNA[i:i+k]\n freq_dict[first_kmer] -= 1\n\n return clumps # Victory", "def find_frequent_patterns(transactions, support_threshold, possible_class_values):\n tree = FPTree(transactions, support_threshold, possible_class_values, None, None)\n # print(tree.to_string())\n return tree.mine_patterns(support_threshold)", "def frequent_words_t(text, k, t):\n frequent_patterns = []\n count = {}\n for i in range(0, len(text)-k+1):\n pattern = text[i:i+k]\n count[i] = pattern_count(text, pattern)\n if count[i] >= t and pattern not in frequent_patterns:\n frequent_patterns.append(text[i:i+k])\n return frequent_patterns", "def clippingcounter(clipping_list, input_dir):\n\t\texcludelist=[]\n\t\t\n\t\t#dicts to store results\n\t\tdicti=defaultdict(float)\n\t\tmatchesdicti=defaultdict(list)\n\t\tresults=[]\n\t\t\n\t\tclipping_list=[re.compile(\"[^web|i]\\W(\"+i+\")\\W\") if i in [\"cams?\", \"sites?\"] else re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\t#clipping_list=[re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\tclipping_list=set(clipping_list)\n\t\tprint [i.pattern for i in clipping_list]\n\t\t#iterate and match\n\t\tfor dir in [i for i in os.listdir(input_dir) if not i.startswith(\".\")]:\n\t\t\tprint dir\n\t\t\tfor fili in [i for i in os.listdir(os.path.join(input_dir, dir)) if not i.startswith(\".\")]:\n\t\t\t\twith codecs.open(os.path.join(input_dir, dir, fili), \"r\", \"utf-8\") as inputtext:\n\t\t\t\t\tinputad=ct.adtextextractor(inputtext.read(), fili).lower()\n\t\t\t\t#result is a list of lists which contain matches for each regex/acronym\n\t\t\t\tresult=[([m for m in i.findall(inputad) if not m in excludelist], i.pattern) for i in clipping_list] \n\t\t\t\t# o=[(r,os.path.join(input_dir, dir, fili)) for r in result if len(r[0]) > 2]\n# \t\t\t\tif o:\n# \t\t\t\t\tprint o\n\t\t\t\tresults.append([len(matches) for matches, pattern in result])\n\t\t\t\tfor matches, pattern in result:\n \t\t\t\t\t#the dicti is {pattern:count, pattern: count, ...}\n \t\t\t\t\tdicti[pattern]=dicti[pattern]+len(matches)\n \t\t\t\t\tmatchesdicti[pattern]=matchesdicti[pattern]+matches\n\t\tprint \"\\n\".join([\":\".join((i, str(dicti[i]), \"|\".join(set(matchesdicti[i])))) for i in sorted(dicti, key=dicti.get, reverse=True)])\t\n\t\tfor entry in {k:v for k,v in matchesdicti.items() if v > 10}:\n\t\t\tprint entry\n\t\t\ttk.tokenfinder([re.sub(\"[\\(\\)]\", \"\", entry)], \"/Users/ps22344/Downloads/craig_0208\")\n\t\treturn results", "def sorted_frequent_words(text: str, k: int) -> Set[str]:\n frequent_patterns = set()\n index = []\n count = []\n for i in range(len(text) - k + 1):\n pattern = text[i:i+k]\n index.append(pattern_to_number(pattern))\n count.append(1)\n\n sorted_index = sorted(index)\n\n for i in range(1, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i-1] + 1\n\n max_count = max(count)\n\n for i in range(len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.add(pattern)\n\n return frequent_patterns", "def frequent_words(text, k):\n\n frequent_patterns = []\n freq_map = frequency_table(text, k)\n max_val = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == max_val:\n frequent_patterns.append(key)\n return frequent_patterns", "def kn_cooccurences(self, target_column, k):\r\n n = len(self.token_set)\r\n coolset = sortedset()\r\n for word in self.token_set:\r\n if word <> \"*\":\r\n coolset.add((self.get(target_column, word), word))\r\n array = list(coolset[len(coolset) - k : len(coolset)])\r\n array.reverse()\r\n return array", "def find_occurrences(text, pattern, d=0):\n idx_of_last_pattern = len(text) - len(pattern)\n return [i for i in range(idx_of_last_pattern + 1) if hamming(text[i:i + len(pattern)], pattern) <= d]", "def find_all(self, p):\n ln = self.ln\n t = self.t\n occurrences = []\n hints = self.__getHints(p)\n for i in hints:\n # compare rest char in pattern with chars in text after hinted substring\n if t[i + ln:i + len(p)] == p[ln:]:\n occurrences.append(i)\n return occurrences", "def _clump(input, valid, output, search_list, clumpId=1):\n (ysize, xsize) = input.shape\n\n # lists slow from Numba - use an array since\n # we know the maximum size\n searchIdx = 0\n\n # run through the image\n for y in range(ysize):\n for x in range(xsize):\n # check if we have visited this one before\n if valid[y, x] and output[y, x] == 0:\n val = input[y, x]\n searchIdx = 0\n search_list[searchIdx, 0] = y\n search_list[searchIdx, 1] = x\n searchIdx += 1\n output[y, x] = clumpId # marked as visited\n\n while searchIdx > 0:\n # search the last one\n searchIdx -= 1\n sy = search_list[searchIdx, 0]\n sx = search_list[searchIdx, 1]\n\n # work out the 3x3 window to vist\n tlx = sx - 1\n if tlx < 0:\n tlx = 0\n tly = sy - 1\n if tly < 0:\n tly = 0\n brx = sx + 1\n if brx > xsize - 1:\n brx = xsize - 1\n bry = sy + 1\n if bry > ysize - 1:\n bry = ysize - 1\n\n for cx in range(tlx, brx+1):\n for cy in range(tly, bry+1):\n # do a '4 neighbour search'\n # don't have to check we are the middle\n # cell since output will be != 0\n # since we do that before we add it to search_list\n if (cy == sy or cx == sx) and (valid[cy, cx] and \n output[cy, cx] == 0 and \n input[cy, cx] == val):\n output[cy, cx] = clumpId # mark as visited\n # add this one to the ones to search the neighbours\n search_list[searchIdx, 0] = cy\n search_list[searchIdx, 1] = cx\n searchIdx += 1\n clumpId += 1\n\n return clumpId", "def fastClumpFinder(sequence, k, L, t):\n\n # to be implemented ;)\n pass", "def neighbors(pattern, d):\n tides = set([\"A\", \"C\", \"G\", \"T\"])\n if d == 0:\n return set([pattern])\n if len(pattern) == 1:\n return tides\n neighborhood = set([])\n suffix_neighbors = neighbors(pattern[1:], d)\n for text in suffix_neighbors:\n if ham_dist(pattern[1:], text) < d:\n for tide in tides:\n neighborhood.add(tide + text)\n else:\n neighborhood.add(pattern[0] + text)\n return neighborhood", "def common_words_min(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n with open(filename, 'r') as f:\n contents = f.read()\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance, key=occurance.get, reverse=True)", "def find_charity_sentences(subdoc, factory) -> List:\n\n calculate_distances_per_pattern(subdoc, factory, merge=True, pattern_prefix='x_charity_')\n\n slices = []\n vectors = filter_values_by_key_prefix(subdoc.distances_per_pattern_dict, 'x_charity_')\n vectors_i = []\n for v in vectors:\n if max(v) > 0.6:\n vector_i, _ = improve_attention_vector(subdoc.embeddings, v, relu_th=0.6, mix=0.9)\n vectors_i.append(vector_i)\n else:\n vectors_i.append(v)\n\n x = max_exclusive_pattern(vectors_i)\n x = relu(x, 0.8)\n subdoc.distances_per_pattern_dict['$at_x_charity_'] = x\n\n dups = {}\n for i in np.nonzero(x)[0]:\n bounds = get_sentence_bounds_at_index(i, subdoc.tokens)\n\n if bounds[0] not in dups:\n sl = slice(bounds[0], bounds[1])\n sum_ = sum(x[sl])\n confidence = 'x'\n # confidence = np.mean( np.nonzero(x[sl]) )\n nonzeros_count = len(np.nonzero(x[sl])[0])\n print('nonzeros_count=', nonzeros_count)\n confidence = 0\n\n if nonzeros_count > 0:\n confidence = sum_ / nonzeros_count\n print('confidence=', confidence)\n if confidence > 0.8:\n # GLOBALS__['renderer'].render_color_text(subdoc.tokens_cc[sl],\n # subdoc.distances_per_pattern_dict['$at_x_charity_'][sl], _range=(0, 1))\n print(i, sum_)\n\n slices.append((sl, confidence, sum_))\n\n dups[bounds[0]] = True\n\n return slices", "def match_all_cui(s,max_len = 10, Eterm_cui = Eterm_cui):\n if len(s) == 0: \n return []\n sub_label = np.zeros(len(s),dtype = 'int')\n location_term = {}\n i = 0\n while i < len(s):\n for j in range(max_len+1,0,-1):\n temp = ' '.join(s[i:i+j])\n if temp in Eterm_cui:\n sub_label[i:i+j] = 1\n location_term[i] = [Eterm_cui[temp]]\n break#matched maximum string, so break\n i += j\n output = []\n for i in range(len(s)):\n if sub_label[i] == 0:#no match\n output += [s[i]]\n elif i in location_term:\n for cui in location_term[i][: :-1]:\n output += [cui]\n return output", "def getFrequentPatterns(self):\n return self.finalPatterns", "def findClosed(freqSet, freqSup):", "def find_matches(words, min_match_ratio):\n couples = []\n with Pool(processes=mp.cpu_count()) as pool:\n results = pool.starmap(\n get_fuzz_ratio, itertools.combinations(words, 2))\n for result, word, paired_word in results:\n if result >= min_match_ratio:\n couples.append([word, paired_word])\n return couples", "def find_clumps(genome, k, L, t):\n assert (is_dna(genome))\n counts = collections.defaultdict(int)\n\n # compute counts of kmers in first L-length part of genome\n for k_start in range(L - k + 1):\n counts[genome[k_start:k_start + k]] += 1\n kmers = _get_keys(counts, t)\n\n # slide L-length window and update counts\n # remove previous leftmost kmer and add new kmer being rightmost in current window\n for L_start in range(1, len(genome) - L + 1):\n counts[genome[L_start - 1:L_start + k - 1]] -= 1\n new_kmer = genome[L_start + L - k:L_start + L]\n counts[new_kmer] += 1\n if counts[new_kmer] >= t:\n kmers.add(new_kmer)\n return kmers", "def find_all_occurrences_brute_force(pattern, text):\n\n result = []\n\n if len(text) < len(pattern):\n return result\n\n for i in range(0, len(text) - len(pattern) + 1):\n matched = True\n\n k = 0\n for j in range(i, i + len(pattern)):\n if pattern[k] != text[j]:\n matched = False\n break\n k += 1\n\n if matched:\n result.append(i)\n\n return result", "def frequent_words_by_sorting(text, k):\n frequent_patterns = []\n index = []\n count = []\n for i in range(0, len(text) - k + 1):\n pattern = text[i:i + k]\n index[i] = pattern_to_number(pattern)\n count[i] = 1\n sorted_index = sorted(index)\n for i in range(0, len(text) - k + 1):\n if sorted_index[i] == sorted_index[i-1]:\n count[i] = count[i -1] + 1\n max_count = max(count)\n for i in range(0, len(text) - k + 1):\n if count[i] == max_count:\n pattern = number_to_pattern(sorted_index[i], k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def common_words_tuple(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n with open(filename, 'r') as f:\n contents = f.read()\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance.items(), key=lambda item:item[1], reverse=True)" ]
[ "0.7552866", "0.677585", "0.67337894", "0.6654941", "0.6105251", "0.6030954", "0.60170376", "0.5999396", "0.59586275", "0.5862602", "0.5852816", "0.5745339", "0.56893045", "0.5677121", "0.56442255", "0.56434375", "0.56361", "0.5556229", "0.54699785", "0.5458053", "0.54559916", "0.54548615", "0.54229635", "0.54106414", "0.5410516", "0.5406393", "0.5349291", "0.53383213", "0.53346336", "0.533374" ]
0.7205846
1
Function turns off agent learning.
def turn_off_learning(self): self.epsilon = 0 self.alpha = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_off(self, **kwargs):\n self._lj.deactivate_load(self._index)", "def turn_off(self):\n self.robot.stop_simulation()", "def lightning_turnoff(self):\n self.turnOff()", "def stopThinking(self):\n self._brain.setState(\"controlled\")", "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()", "def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def turnLightingSystemOff():\n dislin.light('OFF')", "async def unlight(self) -> None:\n self.lit = False\n await self.run_command(\"miner fault_light off\")\n print(\"unlight\" + self.ip)", "def reset(self):\n self.ai.reset()", "def _untrain(self):\n if self.__clf:\n self.__clf._untrain()", "def disable(self):\n self.direction = None # remove direction\n self.state['enabled'] = False # reset states\n self.state['return'] = False\n self.return_path = None # remove path\n if self.state['blue']:\n self.stop_blue_state(resume_audio=False)\n self.image, _ = self.norm_images.get_image() # reset image\n self.sound_manager.stop()", "def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")", "def turn_off(self, **kwargs):\n _LOGGER.debug(\"Turning off Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"never\")", "def turn_off(self, **kwargs):\n setattr(self.resource, self.variable, False)", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", "def turn_off(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Stopping all torrents\")\n self._tm_client.api.stop_torrents()\n if self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission off\")\n self._tm_client.api.set_alt_speed_enabled(False)\n self._tm_client.api.update()", "def unlearn(self, p: np.ndarray, epoch: int):\n self.set_inputs(p)\n self.set_transformation(p)\n self.reset_outputs_to_rest()\n self.reset_output_transformation_to_rest()\n\n self.activation(clamps = ['input', 'transformation'])\n if self.config.strict_leech and self.config.max_activation_cycles_fully_unclamped > 0:\n self.activation(clamps = [], max_cycles=self.config.max_activation_cycles_fully_unclamped, is_primed=True)", "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish", "def turn_off(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn off\"):\n self.wemo.off()", "def turn_off_motors():\n MOTOR_HAT.release_motors()", "def turn_off(self, **kwargs: Any) -> None:\n self._light.turn_off()", "def turn_off(self, **kwargs):\n self._is_on = False", "def _disable(self):\n self.enabled = False", "def turn_off(self, **kwargs):\n #self._light.turn_off()\n self._brightness = 0\n self._state = 'off'\n _LOGGER.info(\"turn_off() is called\")", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()" ]
[ "0.6700129", "0.66529197", "0.66521186", "0.64668", "0.63781226", "0.6372063", "0.633508", "0.62015146", "0.61315125", "0.6131017", "0.6021157", "0.60110766", "0.59882134", "0.5959574", "0.5944202", "0.59188724", "0.5908109", "0.58830976", "0.5879937", "0.5866597", "0.58627164", "0.5862707", "0.58480585", "0.5843539", "0.5835401", "0.5823943", "0.5821519", "0.5820188", "0.5808598", "0.58003" ]
0.7653259
0
Transforms a course run into our normalized data structure
def _transform_run(course_run): return { "run_id": course_run["courseware_id"], "title": course_run["title"], "start_date": _parse_datetime(course_run["start_date"]), "end_date": _parse_datetime(course_run["end_date"]), "enrollment_start": _parse_datetime(course_run["enrollment_start"]), "enrollment_end": _parse_datetime(course_run["enrollment_end"]), "published": bool(course_run["current_price"]), "prices": [course_run["current_price"]] if course_run.get("current_price", None) else [], "instructors": [ {"full_name": instructor["name"]} for instructor in course_run["instructors"] ], }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformat_course(courses):\n\n if isinstance(courses, Courses): # if it's just one course object\n reformatted_data = {\n 'startDate': reformat_date(courses.startDate),\n 'organizingMeetingDate': reformat_date(courses.organizingMeetingDate),\n 'startTime': reformat_time(courses.startTime),\n 'organizingMeetingTime': reformat_time(courses.organizingMeetingTime)\n }\n else:\n reformatted_data = []\n for course in courses:\n if isinstance(course, Courses):\n reformatted_data.append(\n {\n 'startDate': reformat_date(course.startDate),\n 'organizingMeetingDate': reformat_date(course.organizingMeetingDate),\n 'startTime': reformat_time(course.startTime),\n 'organizingMeetingTime': reformat_time(course.organizingMeetingTime),\n 'studentLimit': course.studentLimit,\n 'studentCount': len(get_students_by_course(course.id)),\n 'additionalData': course.additionalData\n }\n )\n\n return reformatted_data", "def transform_courses(courses):\n return [_transform_learning_resource_course(course) for course in courses]", "def build_course(self):\n courses = []\n aprovacao_d = {}\n # semestral\n for rate_it in self.__semestral_rate:\n # pega uma lista no qual o primeiro elemento é a taxa, o segundo\n # e o terceiro são quantidades\n rate_data = self.analysis[\"semestral_rate\"][rate_it.name]\n for i in rate_data[0].index:\n if i[0] not in aprovacao_d:\n aprovacao_d[i[0]] = {}\n\n periodo = str(i[1]) + \"/\" + str(i[2])\n aprovacao_d[i[0]][periodo] = [\n float(rate_data[0][i]),\n int(rate_data[1][i]),\n int(rate_data[2][i]),]\n\n note = self.analysis[\"general_note_statistic\"]\n note_last_year = self.analysis[\"last_year_statistic\"]\n for course in self.analysis[\"courses\"].index:\n course_dict = {}\n course_dict[\"disciplina_codigo\"] = course\n course_dict[\"disciplina_nome\"] = self.analysis[\"courses\"][course]\n \n # If the course code is related to more than one name,\n # concatenate these names into an unique string\n if type(course_dict[\"disciplina_nome\"]) != str:\n new_course_name = \" | \".join(list(course_dict[\"disciplina_nome\"]))\n course_dict[\"disciplina_nome\"] = new_course_name\n # quantidade de matriculas\n count = self.analysis[\"general_count_submission\"][course]\n course_dict[\"qtd_alunos\"] = count\n # notas\n course_dict[\"qtd_cursada_aprov\"] = self.analysis[\"coursed_ratio\"][course]\n course_dict[\"nota\"] = [note[0][course], note[1][course]]\n course_dict[\"nota_ultimo_ano\"] = [\n note_last_year[0][course],\n note_last_year[1][course]\n ]\n # taxas\n for rate_it in self.__rates:\n rate_data = self.analysis[\"general_rates\"][rate_it.name]\n course_dict[rate_it.name] = float(rate_data[0][course])\n course_str = rate_it.name.replace(\"taxa\", \"qtd\")\n # count_sel define qual quantidade vai para o json, a especifica\n # ou geral\n course_dict[course_str] = int(\n rate_data[rate_it.count_sel][course])\n # rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n\n # taxas do ultimo anos\n course_dict[\"taxa_reprovacao_ultimo_absoluto\"] = self.analysis[\"last_year_taxa_reprovacao_absoluta\"][course]\n course_dict[\"taxa_reprovacao_ultimo_frequencia\"] = self.analysis[\"last_year_taxa_reprovacao_frequencia\"][course]\n\n course_dict[\"grafico_qtd_cursada_aprov\"] = self.analysis[\"coursed_count\"][course]\n course_dict[\"aprovacao_semestral\"] = aprovacao_d[course]\n courses.append(course_dict)\n return courses", "def _transform_learning_resource_course(course):\n return {\n \"readable_id\": course[\"readable_id\"],\n \"platform\": PlatformType.xpro.value,\n \"title\": course[\"title\"],\n \"image\": {\"url\": course[\"thumbnail_url\"]},\n \"offered_by\": copy.deepcopy(OFFERED_BY),\n \"description\": course[\"description\"],\n \"url\": course.get(\"url\"),\n \"published\": any(\n map(\n lambda course_run: course_run.get(\"current_price\", None),\n course[\"courseruns\"],\n )\n ),\n \"topics\": transform_topics(course.get(\"topics\", [])),\n \"runs\": [_transform_run(course_run) for course_run in course[\"courseruns\"]],\n \"resource_type\": LearningResourceType.course.value,\n }", "def preprocess_courses_corpus():\n soup = None\n with open('courses_corpus.html', 'r') as infile:\n content = infile.read()\n\n soup = BeautifulSoup(content, 'html.parser')\n\n docid = 0\n data = {}\n data['documents'] = []\n\n main_table = soup.find_all(\"div\", attrs={'class': 'courseblock'})\n for course in main_table:\n docid += 1\n title = course.find_all('p', attrs={'class':'courseblocktitle noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblocktitle noindent'}))!=0 else ''\n description = (course.find_all('p', attrs={'class':'courseblockdesc noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblockdesc noindent'}))!=0 else '') + ' ' + (course.find_all('p', attrs={'class':'courseblockextra noindent'})[0].text if len(course.find_all('p', attrs={'class':'courseblockextra noindent'}))!=0 else '')\n\n data['documents'].append({\n 'docId' : docid,\n 'title' : title.strip(),\n 'description' : description.strip()\n })\n\n with open('courses_data.json', 'w') as outfile:\n json.dump(data, outfile)", "def _load_course(self, course):\r\n y_size, x_size = len(course), len(course[0])\r\n self.course = np.zeros((x_size, y_size), dtype=np.int16)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n point = course[y][x]\r\n if point == 'o':\r\n self.course[x, y] = 1\r\n elif point == '-':\r\n self.course[x, y] = 0\r\n elif point == '+':\r\n self.course[x, y] = 2\r\n elif point == 'W':\r\n self.course[x, y] = -1\r\n # flip left/right so (0,0) is in bottom-left corner\r\n self.course = np.fliplr(self.course)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n if self.course[x, y] == 0:\r\n self.start_positions.append((x, y))", "def build_courses_from_rows(self, rowset):\n class_id_re = re.compile(\"[A-Z]+&* [0-9]+\")\n courses = []\n course_learning_outcomes = []\n for row in rowset:\n if not class_id_re.fullmatch(row[0].strip()):\n continue\n # If credit is numeric assign it to lower and upper credit bound\n # Otherwise, split the credit range and assign\n try:\n lowercb = float(row[2])\n uppercb = float(row[2])\n except ValueError:\n if \"-\" in row[2]:\n bounds = row[2].split(\"-\")\n lowercb = float(bounds[0])\n uppercb = float(bounds[1])\n else:\n lowercb = None\n uppercb = None\n \n course = models.Course(id=row[0].strip(),\n label=row[1].strip(\" or\"),\n lower_credit_bound=lowercb,\n upper_credit_bound=uppercb)\n course.save()\n\n outcome_string = row[3]\n clo_content = re.findall(\"[0-9]+\", outcome_string)\n for outcome in clo_content:\n core_learning_outcome = models.CoreLearningOutcome.objects.get(\n id=int(\n outcome))\n try:\n models.CourseLearningOutcome.objects.get(\n course=course,\n learning_outcome=core_learning_outcome)\n break\n except models.CourseLearningOutcome.DoesNotExist:\n course_learning_outcome = models.CourseLearningOutcome(\n course=course,\n learning_outcome=core_learning_outcome)\n course_learning_outcome.save()\n \n return (courses, course_learning_outcomes)", "def parse_course_pre_to_list(self):\n prere_courses = []\n\n # convert non-word to spaces except \"-\"\n self.prere_raw = re.sub(\"[^\\w-]\", \" \", self.prere_raw)\n\n # split the string by spaces\n words = self.prere_raw.split()\n\n # check if the string contains number, if True then the string is of the form: \"140A\"\n def append_to_list(word, previous_word):\n try:\n if word[0].isdigit():\n toappend = None\n # course abbs = words[i-1]\n try:\n toappend = \"{} {}\".format(previous_word.upper(), word.upper())\n except AttributeError:\n #TODO check this error for HIGR 216A-B\n print(\"previous word is {}, word is {}\".format(previous_word, word))\n if toappend not in prere_courses:\n prere_courses.append(toappend)\n except IndexError:\n #TODO why this would occur?\n print(\"word is {}, previous word is {}\".format(word, previous_word))\n\n # iterate through words to find numbers\n for i in range(len(words)):\n\n previous_word = None\n if i is not 0:\n # define the previous word like MATH\n previous_word = words[i-1]\n\n if \"-\" in words[i]:\n num = re.split(\"[A-Z]\", words[i])[0]\n letters = re.split(\"-\", words[i])\n new_words = []\n for i in range(len(letters)):\n if i is 0:\n new_words.append(letters[0])\n else:\n new_words.append(num + letters[i])\n for word in new_words:\n if word is not None and previous_word is not None:\n append_to_list(word, previous_word)\n else:\n #TODO: what if the word is None?\n pass\n else:\n append_to_list(words[i], previous_word)\n\n return prere_courses", "def transform_programs(programs):\n # normalize the xPro data into the course_catalog/models.py data structures\n return [\n {\n \"readable_id\": program[\"readable_id\"],\n \"title\": program[\"title\"],\n \"image\": {\"url\": program[\"thumbnail_url\"]},\n \"description\": program[\"description\"],\n \"offered_by\": copy.deepcopy(OFFERED_BY),\n \"published\": bool(\n program[\"current_price\"]\n ), # a program is only considered published if it has a product/price\n \"url\": program[\"url\"],\n \"topics\": transform_topics(program.get(\"topics\", [])),\n \"platform\": PlatformType.xpro.value,\n \"resource_type\": LearningResourceType.program.value,\n \"runs\": [\n {\n \"prices\": [program[\"current_price\"]]\n if program.get(\"current_price\", None)\n else [],\n \"title\": program[\"title\"],\n \"run_id\": program[\"readable_id\"],\n \"enrollment_start\": _parse_datetime(program[\"enrollment_start\"]),\n \"start_date\": _parse_datetime(program[\"start_date\"]),\n \"end_date\": _parse_datetime(program[\"end_date\"]),\n \"description\": program[\"description\"],\n \"instructors\": [\n {\"full_name\": instructor[\"name\"]}\n for instructor in program.get(\"instructors\", [])\n ],\n }\n ],\n \"courses\": transform_courses(program[\"courses\"]),\n }\n for program in programs\n ]", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def _construct_course_son(org, course, run):\r\n return bson.son.SON([\r\n ('org', org),\r\n ('course', course),\r\n ('name', run)\r\n ])", "def build_general_course(self):\n\n courses = {}\n\n if self.__build_analyze is False:\n self.build_analysis()\n\n courses[\"taxa_conhecimento\"] = self.analysis[\"taxa_conhecimento\"]\n courses[\"taxa_reprovacao\"] = self.analysis[\"taxa_reprovacao_absoluta\"]\n courses[\"taxa_trancamento\"] = self.analysis[\"taxa_trancamento\"]\n\n # cria cache\n cache = {}\n note = self.analysis[\"general_note_statistic\"]\n for rate_it in self.__rates:\n rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n for course in self.analysis[\"courses\"].index:\n if course not in cache:\n cache[course] = {}\n cache[course][rate_it.name] = rate_calc[course]\n cache[course][\"nota\"] = [note[0][course], note[1][course]]\n\n courses[\"cache\"] = cache\n\n # cria o campo compara_aprov\n courses[\"compara_aprov\"] = self.analysis[\"graph_course\"]\n\n # cria o campo courses\n courses[\"disciplinas\"] = self.analysis[\"courses\"].to_dict()\n\n return courses", "def get_course(data):\n\n return {item['course'] for item in data}", "def get_courses(self) -> Tuple[Dict[int, float], Dict[float, List[int]]]:\n\n loop_ids_to_course = {}\n course_to_loop_ids = {0: []}\n current_course_id = 0\n\n # Iterate through the loops\n for current_loop_id in range(self.last_loop_id + 1):\n # Check if there are any parent loops, if there are, then iterate through them\n for parent in self.loops[current_loop_id].parent_loops:\n # If the parent loop is in the course below the current course, there is a course change\n if loop_ids_to_course[parent.loop_id] != current_course_id-1:\n # Course change! Increment course id and create an empty course.\n current_course_id += 1\n course_to_loop_ids[current_course_id] = []\n # Add values to dictionaries\n loop_ids_to_course[current_loop_id] = current_course_id\n course_to_loop_ids[current_course_id].append(current_loop_id)\n\n return(loop_ids_to_course, course_to_loop_ids)", "def get_courses(std):\n return std[2]", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def populate_course(self):\r\n def descend(parent, stack):\r\n xblock_type = stack.pop(0)\r\n for _ in range(2):\r\n child = ItemFactory.create(category=xblock_type, parent_location=parent.location)\r\n if stack:\r\n descend(child, stack)\r\n\r\n descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])", "def reconstruct(self, X):", "def reconstruct(self, X):", "def prepare_courses_for_radio():\n\n three_closest_courses = get_four_future_courses()\n radio_courses = []\n\n for course in three_closest_courses:\n radio_courses.append(\n (int(course.id), str(reformat_date(course.startDate) + '&nbsp;r.'))\n )\n return radio_courses", "def _course_from_query_response(self, term, response):\n units_low, units_hi = float(response['UNITS_LOW']), float(response['UNITS_HIGH'])\n if units_low > units_hi:\n # Yes, this is an actual response case...\n # Occurs when a course has a constant # of units.\n # I think units_hi should equal units_low when actual units is constant.\n units_hi = units_low\n\n instructor_name = instructor_email = None\n try:\n instructor_meta = next(instr for instr in response['INSTRUCTORS'] if instr['PRIMARY_IND'] == 'Y')\n instructor_name = '{} {}'.format(instructor_meta['FIRST_NAME'], instructor_meta['LAST_NAME'])\n instructor_name = instructor_name.strip()\n instructor_email = instructor_meta['EMAIL']\n except StopIteration:\n # No instructor specified\n pass\n\n ge_areas = list()\n try:\n area_codes = filter(None, response['GE3CREDIT'].split(','))\n ge_areas = [GE_AREA_NAMES_BY_SB_CODE[area_code] for area_code in area_codes]\n except KeyError as e:\n logging.exception('Unrecognized GE code')\n\n meetings = list()\n for meeting in response['COURSEMEETINGDATA']:\n days = meeting['WEEKDAYS'].replace(',', '')\n times = None\n try:\n begin_hour, begin_minutes = meeting['BEGIN_TIME'][:2], meeting['BEGIN_TIME'][2:]\n end_hour, end_minutes = meeting['END_TIME'][:2], meeting['END_TIME'][2:]\n begin = timedelta(hours=int(begin_hour), minutes=int(begin_minutes))\n end = timedelta(hours=int(end_hour), minutes=int(end_minutes))\n times = (begin, end)\n except TypeError:\n # times are None, indicating TBA\n pass\n\n location = meeting['BLDG_DESC']\n if meeting['ROOM']:\n location += ' ' + meeting['ROOM']\n\n meeting = {\n 'days': days,\n 'times': times,\n 'location': location,\n 'type': meeting['MEET_TYPE_DESC_SHORT']\n }\n meetings.append(meeting)\n\n final_exam = None\n try:\n final_exam = datetime.strptime(response['FINALEXAMSTARTDATE'], '%B, %d %Y %H:%M:%S')\n except TypeError:\n # No final exam\n pass\n\n drop_time = response['ALLOWEDDROPDESC']\n drop_days_match = re.match(r'^([0-9]+)', drop_time)\n if drop_days_match:\n drop_time = int(drop_days_match.group(1))\n if response['DESCRIPTION']:\n response['DESCRIPTION'] = response['DESCRIPTION'].replace('\\n', ' ').replace('\\r', '').strip()\n response['TITLE'] = response['TITLE'].strip()\n\n return Course(\n term=term,\n crn=response['PASSEDCRN'],\n subject_code=response['SUBJECT_CODE'],\n name='{} {}'.format(response['SUBJECT_CODE'], response['COURSE_NUMBER']),\n number=response['COURSE_NUMBER'],\n section=response['SEC'],\n title=response['TITLE'].strip(),\n description=response['DESCRIPTION'],\n instructor_consent_required=bool(int(response['CONSENTOFINSRUCTORREQUIRED'])),\n units=(units_low, units_hi),\n instructor=instructor_name,\n instructor_email=instructor_email,\n ge_areas=ge_areas,\n available_seats=response['BLEND_SEATS_AVAIL'],\n wl_length=response['BLEND_WAIT_COUNT'],\n meetings=meetings,\n final_exam=final_exam,\n drop_time=drop_time,\n prerequisites=re.sub(r'\\s+', ' ', response['PREREQUISITES']) if response['PREREQUISITES'] else None)", "def course_rolls(records):\n \n dict_map_course_and_stud_num = {}\n \n for each_tuple in records:\n \n course_info = each_tuple[0]\n student_info = each_tuple[1]\n course_id = course_info[0]\n \n \n if course_id in dict_map_course_and_stud_num:\n dict_map_course_and_stud_num[course_id].add(student_info)\n \n else:\n dict_map_course_and_stud_num[course_id] = set()\n dict_map_course_and_stud_num[course_id].add(student_info)\n \n return(dict_map_course_and_stud_num)", "def process_course(xml_course):\n\n parse_course = parse_get_course(xml_course)\n if not parse_course: # Does not already exist, create\n parse_course = parse_create_course(xml_course)\n else:\n # TODO: Update existing attributes of the course here and upload the changes\n None\n parse_course_id = upload_course(parse_course)\n\n print (\"Processed Course: {0} {1}\"\n .format(parse_course[\"subject\"], parse_course[\"courseNumber\"]))\n\n # TODO: Implement the functions for this stuff\n parse_section = parse_get_section(xml_course)\n if not parse_section: # Does not already exist, create\n parse_section = parse_create_section(xml_course)\n else:\n # TODO: Update existing attributes of the section here and upload the changes\n None\n parse_section_id = upload_section(parse_section)\n\n put_child(parse_course_id, parse_section_id)", "def create_courses():\n\n\t# create list for courses\n\tallcourses = []\n\n\t# load courses as classes in allcourses-list\n\twith open(\"../data/vakken.csv\", \"rt\") as coursefile:\n\n\t\t# clean text\n\t\tcourses = csv.reader(coursefile)\n\t\tfor row in courses:\n\t\t\tfor text in row:\n\t\t\t\tcourse_info = text.split(\";\")\n\n\t\t\t\t# add course name\n\t\t\t\tcourse_name = course_info[0]\n\n\t\t\t\t# add amount of lectures\n\t\t\t\tcourse_lectures = course_info[1]\n\n\t\t\t\t# add amount of seminars\n\t\t\t\tcourse_seminars = course_info[2]\n\n\t\t\t\t# add max amount seminars\n\t\t\t\tcourse_max_sem = course_info[3]\n\t\t\t\tif course_max_sem == \"nvt\":\n\t\t\t\t\tcourse_max_sem = 0\n\n\t\t\t\t# add amount of practicals\n\t\t\t\tcourse_practicals = course_info[4]\n\n\t\t\t\t# add max amount practicals\n\t\t\t\tcourse_max_prac = course_info[5]\n\t\t\t\tif course_max_prac == \"nvt\":\n\t\t\t\t\tcourse_max_prac = 0\n\n\t\t\t\t# add course to list\n\t\t\t\tallcourses.append(Course(course_name, course_lectures, course_seminars, course_max_sem, course_practicals, course_max_prac))\n\n\treturn allcourses", "def _construct_course_son(self, course_key):\r\n assert(isinstance(course_key, SlashSeparatedCourseKey))\r\n return bson.son.SON([\r\n ('org', course_key.org),\r\n ('course', course_key.course),\r\n ('name', course_key.run)\r\n ])", "def transform():", "def open_courses(self, filename):\n\n with open(filename) as courses:\n course_reader = csv.DictReader(courses)\n course_list = []\n for row in course_reader:\n name = row['\\ufeffCourses Period 4']\n lec = row['#lec']\n tut = row['#tut']\n prac = row['#pr']\n tut_tot = row['#tuttot']\n prac_tot = row['#prtot']\n max_tut = row['#max stud tut']\n max_prac = row['#max stud pr']\n exp_stud = row['E(students)']\n dif_total = int(row['#lec']) + int(row['#tut']) + int(row['#pr'])\n act_tot = int(row['#lec']) + int(row['#tuttot']) + int(row['#prtot'])\n course = Courses(name, lec, tut, prac, tut_tot, prac_tot, max_tut, max_prac, exp_stud, act_tot, dif_total)\n course_list.append(course)\n\n course_list_simulated = []\n\n for i in range(len(course_list)):\n lecs = int(course_list[i].lec)\n course_list_simulated.append(course_list[i].name + '_lec')\n if lecs > 0:\n for j in range(lecs):\n activity = course_list[i].name\n activity = activity + '_lec'\n course_list[i].add(activity)\n tuts = int(course_list[i].tut_tot)\n if tuts > 0:\n course_list_simulated.append(course_list[i].name + '_tut')\n for k in range(tuts):\n activity = course_list[i].name\n activity = activity + '_tut'\n course_list[i].add(activity)\n pracs = int(course_list[i].prac_tot)\n if pracs > 0:\n course_list_simulated.append(course_list[i].name + '_prac')\n for l in range(pracs):\n activity = course_list[i].name\n activity = activity + '_prac'\n course_list[i].add(activity)\n\n\n return course_list", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def pass_two(self, degree_program_rows):\n class_id_re = re.compile(\"[A-Z]+&* [0-9]+\")\n for dp_rowset in enumerate(degree_program_rows):\n degree_program = models.DegreeProgram.objects.get(label=dp_rowset[1][0][0])\n last_parent = (dp_rowset[0], 1)\n # Check to make sure first course in program isn't generic\n # If it is, change it\n if dp_rowset[1][1][0].startswith(\"Generic\"):\n for course_row in enumerate(dp_rowset[1]):\n if course_row[1][0].startswith(\"ATA\"):\n continue\n elif not course_row[1][0].startswith(\"Generic\"):\n last_parent = (dp_rowset[0], course_row[0])\n break\n \n substitute = False\n for row in enumerate(dp_rowset[1]):\n if not class_id_re.fullmatch(row[1][0].strip()):\n continue\n course_id = row[1][0]\n course_title = row[1][1]\n course_credits = row[1][2]\n \n course = models.Course.objects.get(id=course_id)\n # Set flags on elective, substitute, and generic\n elective = bool(row[1][-1])\n generic = course_id.startswith(\"Generic\")\n # Get parent course\n parent = degree_program_rows[last_parent[0]][last_parent[1]]\n parent_course = models.Course.objects.get(id=parent[0])\n if not substitute and not generic:\n dpcs = models.DPCourseSpecific(\n degree_program=degree_program,\n course=course,\n elective=elective)\n dpcs.save()\n last_parent = (dp_rowset[0], row[0])\n elif generic and not substitute:\n credit_type = self.extract_generic_credit_type(row[1])\n dpcg = models.DPCourseGeneric(\n degree_program=degree_program,\n credit_type=credit_type,\n credits=course_credits,\n elective=elective)\n dpcg.save()\n # Omission of last parent update purposeful\n # as a hack because I don't think this\n # situation ever actually occurs in the data\n #TODO: Do this correctly.\n elif substitute and not generic:\n dp_parent_course = models.DPCourseSpecific.objects.get(\n degree_program=degree_program,\n course=parent_course)\n dpcss = models.DPCourseSubstituteSpecific(\n parent_course=dp_parent_course,\n course=course)\n dpcss.save()\n elif substitute and generic:\n dp_parent_course = models.DPCourseSpecific.objects.get(\n degree_program=degree_program,\n course=parent_course)\n credit_type = self.extract_generic_credit_type(row[1])\n dpcsg = models.DPCourseSubstituteGeneric(\n parent_course=dp_parent_course,\n credit_type=credit_type,\n credits=course_credits,\n elective=elective)\n dpcsg.save()\n else:\n raise ValueError(\"Improper combination of flags!\")\n substitute = course_title.strip().endswith(\"or\")\n\n \n return True" ]
[ "0.6545898", "0.65071106", "0.6343304", "0.6102301", "0.5970706", "0.5895119", "0.5769776", "0.5554748", "0.5535331", "0.5529821", "0.5447787", "0.5391927", "0.535386", "0.5326052", "0.52869093", "0.5249625", "0.5244551", "0.5226693", "0.5220069", "0.5220069", "0.5208763", "0.5208387", "0.5172701", "0.5158805", "0.51428354", "0.50761366", "0.50715727", "0.5051284", "0.5031216", "0.5009964" ]
0.6969037
0
Transforms a list of courses into our normalized data structure
def transform_courses(courses): return [_transform_learning_resource_course(course) for course in courses]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformat_course(courses):\n\n if isinstance(courses, Courses): # if it's just one course object\n reformatted_data = {\n 'startDate': reformat_date(courses.startDate),\n 'organizingMeetingDate': reformat_date(courses.organizingMeetingDate),\n 'startTime': reformat_time(courses.startTime),\n 'organizingMeetingTime': reformat_time(courses.organizingMeetingTime)\n }\n else:\n reformatted_data = []\n for course in courses:\n if isinstance(course, Courses):\n reformatted_data.append(\n {\n 'startDate': reformat_date(course.startDate),\n 'organizingMeetingDate': reformat_date(course.organizingMeetingDate),\n 'startTime': reformat_time(course.startTime),\n 'organizingMeetingTime': reformat_time(course.organizingMeetingTime),\n 'studentLimit': course.studentLimit,\n 'studentCount': len(get_students_by_course(course.id)),\n 'additionalData': course.additionalData\n }\n )\n\n return reformatted_data", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def create_courses():\n\n\t# create list for courses\n\tallcourses = []\n\n\t# load courses as classes in allcourses-list\n\twith open(\"../data/vakken.csv\", \"rt\") as coursefile:\n\n\t\t# clean text\n\t\tcourses = csv.reader(coursefile)\n\t\tfor row in courses:\n\t\t\tfor text in row:\n\t\t\t\tcourse_info = text.split(\";\")\n\n\t\t\t\t# add course name\n\t\t\t\tcourse_name = course_info[0]\n\n\t\t\t\t# add amount of lectures\n\t\t\t\tcourse_lectures = course_info[1]\n\n\t\t\t\t# add amount of seminars\n\t\t\t\tcourse_seminars = course_info[2]\n\n\t\t\t\t# add max amount seminars\n\t\t\t\tcourse_max_sem = course_info[3]\n\t\t\t\tif course_max_sem == \"nvt\":\n\t\t\t\t\tcourse_max_sem = 0\n\n\t\t\t\t# add amount of practicals\n\t\t\t\tcourse_practicals = course_info[4]\n\n\t\t\t\t# add max amount practicals\n\t\t\t\tcourse_max_prac = course_info[5]\n\t\t\t\tif course_max_prac == \"nvt\":\n\t\t\t\t\tcourse_max_prac = 0\n\n\t\t\t\t# add course to list\n\t\t\t\tallcourses.append(Course(course_name, course_lectures, course_seminars, course_max_sem, course_practicals, course_max_prac))\n\n\treturn allcourses", "def prepare_courses_for_radio():\n\n three_closest_courses = get_four_future_courses()\n radio_courses = []\n\n for course in three_closest_courses:\n radio_courses.append(\n (int(course.id), str(reformat_date(course.startDate) + '&nbsp;r.'))\n )\n return radio_courses", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def build_course(self):\n courses = []\n aprovacao_d = {}\n # semestral\n for rate_it in self.__semestral_rate:\n # pega uma lista no qual o primeiro elemento é a taxa, o segundo\n # e o terceiro são quantidades\n rate_data = self.analysis[\"semestral_rate\"][rate_it.name]\n for i in rate_data[0].index:\n if i[0] not in aprovacao_d:\n aprovacao_d[i[0]] = {}\n\n periodo = str(i[1]) + \"/\" + str(i[2])\n aprovacao_d[i[0]][periodo] = [\n float(rate_data[0][i]),\n int(rate_data[1][i]),\n int(rate_data[2][i]),]\n\n note = self.analysis[\"general_note_statistic\"]\n note_last_year = self.analysis[\"last_year_statistic\"]\n for course in self.analysis[\"courses\"].index:\n course_dict = {}\n course_dict[\"disciplina_codigo\"] = course\n course_dict[\"disciplina_nome\"] = self.analysis[\"courses\"][course]\n \n # If the course code is related to more than one name,\n # concatenate these names into an unique string\n if type(course_dict[\"disciplina_nome\"]) != str:\n new_course_name = \" | \".join(list(course_dict[\"disciplina_nome\"]))\n course_dict[\"disciplina_nome\"] = new_course_name\n # quantidade de matriculas\n count = self.analysis[\"general_count_submission\"][course]\n course_dict[\"qtd_alunos\"] = count\n # notas\n course_dict[\"qtd_cursada_aprov\"] = self.analysis[\"coursed_ratio\"][course]\n course_dict[\"nota\"] = [note[0][course], note[1][course]]\n course_dict[\"nota_ultimo_ano\"] = [\n note_last_year[0][course],\n note_last_year[1][course]\n ]\n # taxas\n for rate_it in self.__rates:\n rate_data = self.analysis[\"general_rates\"][rate_it.name]\n course_dict[rate_it.name] = float(rate_data[0][course])\n course_str = rate_it.name.replace(\"taxa\", \"qtd\")\n # count_sel define qual quantidade vai para o json, a especifica\n # ou geral\n course_dict[course_str] = int(\n rate_data[rate_it.count_sel][course])\n # rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n\n # taxas do ultimo anos\n course_dict[\"taxa_reprovacao_ultimo_absoluto\"] = self.analysis[\"last_year_taxa_reprovacao_absoluta\"][course]\n course_dict[\"taxa_reprovacao_ultimo_frequencia\"] = self.analysis[\"last_year_taxa_reprovacao_frequencia\"][course]\n\n course_dict[\"grafico_qtd_cursada_aprov\"] = self.analysis[\"coursed_count\"][course]\n course_dict[\"aprovacao_semestral\"] = aprovacao_d[course]\n courses.append(course_dict)\n return courses", "def preprocess_courses_corpus():\n soup = None\n with open('courses_corpus.html', 'r') as infile:\n content = infile.read()\n\n soup = BeautifulSoup(content, 'html.parser')\n\n docid = 0\n data = {}\n data['documents'] = []\n\n main_table = soup.find_all(\"div\", attrs={'class': 'courseblock'})\n for course in main_table:\n docid += 1\n title = course.find_all('p', attrs={'class':'courseblocktitle noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblocktitle noindent'}))!=0 else ''\n description = (course.find_all('p', attrs={'class':'courseblockdesc noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblockdesc noindent'}))!=0 else '') + ' ' + (course.find_all('p', attrs={'class':'courseblockextra noindent'})[0].text if len(course.find_all('p', attrs={'class':'courseblockextra noindent'}))!=0 else '')\n\n data['documents'].append({\n 'docId' : docid,\n 'title' : title.strip(),\n 'description' : description.strip()\n })\n\n with open('courses_data.json', 'w') as outfile:\n json.dump(data, outfile)", "def get_course(data):\n\n return {item['course'] for item in data}", "def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)", "def build_courses_from_rows(self, rowset):\n class_id_re = re.compile(\"[A-Z]+&* [0-9]+\")\n courses = []\n course_learning_outcomes = []\n for row in rowset:\n if not class_id_re.fullmatch(row[0].strip()):\n continue\n # If credit is numeric assign it to lower and upper credit bound\n # Otherwise, split the credit range and assign\n try:\n lowercb = float(row[2])\n uppercb = float(row[2])\n except ValueError:\n if \"-\" in row[2]:\n bounds = row[2].split(\"-\")\n lowercb = float(bounds[0])\n uppercb = float(bounds[1])\n else:\n lowercb = None\n uppercb = None\n \n course = models.Course(id=row[0].strip(),\n label=row[1].strip(\" or\"),\n lower_credit_bound=lowercb,\n upper_credit_bound=uppercb)\n course.save()\n\n outcome_string = row[3]\n clo_content = re.findall(\"[0-9]+\", outcome_string)\n for outcome in clo_content:\n core_learning_outcome = models.CoreLearningOutcome.objects.get(\n id=int(\n outcome))\n try:\n models.CourseLearningOutcome.objects.get(\n course=course,\n learning_outcome=core_learning_outcome)\n break\n except models.CourseLearningOutcome.DoesNotExist:\n course_learning_outcome = models.CourseLearningOutcome(\n course=course,\n learning_outcome=core_learning_outcome)\n course_learning_outcome.save()\n \n return (courses, course_learning_outcomes)", "def get_courses():\n courses = []\n courses_recs = Course._file.read_db()\n for course in courses_recs[\"courses\"]:\n courses.append(Course(**course))\n return courses", "def load_courses(self):\r\n store = modulestore()\r\n\r\n # Add a course with a unicode name, if the modulestore\r\n # supports adding modules.\r\n if hasattr(store, 'create_xmodule'):\r\n CourseFactory.create(org=u'ëḋẌ',\r\n course=u'śíḿṕĺé',\r\n display_name=u'2012_Fáĺĺ',\r\n modulestore=store)\r\n\r\n courses = store.get_courses()\r\n # NOTE: if xml store owns these, it won't import them into mongo\r\n if SlashSeparatedCourseKey.from_deprecated_string(TEST_COURSE_ID) not in [c.id for c in courses]:\r\n import_from_xml(store, DATA_DIR, ['toy', 'simple'])\r\n\r\n return [course.id for course in store.get_courses()]", "def set_of_courses(students_list: list) -> set:\n return set(student['course'] for student in students_list)", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def export_courses(courses, output):\n courses = sorted(courses)\n writer = csv.writer(output)\n writer.writerow([\n 'College', 'Department', 'Code', 'Name', 'Credits', 'Tags',\n 'Prerequisites'\n ])\n\n for course in courses:\n writer.writerow([\n course.college, course.department, course.code, course.name,\n course.credits, ','.join(course.tags), ','.join(course.prerequisites)\n ])", "def load_courses(self, file):\n with open(file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.courses.append(Course(row[0], row[1], row[2],row[3],row[4],row[5],row[6],row[7]))\n self.courses.pop(0)", "def get_enrolled_courses(self, courses):\n enrolled_courses = []\n for course in courses:\n if self.is_enrolled(course):\n enrolled_courses.append(self.get_course_info(course))\n return enrolled_courses", "def create_student_groups(allcourses, student_list):\n\n\t# for each course\n\tfor course in allcourses:\n\n\t\t# check all students\n\t\tfor student in student_list:\n\n\t\t\t# if student is attenting course\n\t\t\tif course.name in student.courses:\n\n\t\t\t\t# add student to course class\n\t\t\t\tcourse.add_student(student.last_name)\n\n\t\t# if course has seminars\n\t\tif course.seminars > 0:\n\n\t\t\t# count and add amount to course class\n\t\t\tnumofseminars = math.ceil(course.students/course.maxstudentssem)\n\t\t\tcourse.add_seminar(numofseminars)\n\n\t\t# if course has practicals\n\t\tif course.practicals > 0:\n\n\t\t\t# count and add to course class\n\t\t\tnumofpracticals = math.ceil(course.students/course.maxstudentsprac)\n\t\t\tcourse.add_practical(numofpracticals)\n\n\n\t\t#* divide students over groups *#\n\n\t\t# start with group \"1\"\n\t\tsem = 1\n\n\t\t# if course has seminars\n\t\tif course.seminars > 0:\n\n\t\t\t# iterate over students in course with steps of max amount of students\n\t\t\tfor i in range(0, len(course.studentnames), course.maxstudentssem):\n\n\t\t\t\t# create list with names of students\n\t\t\t\tstudentlist = course.studentnames[i: i + course.maxstudentssem]\n\n\t\t\t\t# add studentlist to course class\n\t\t\t\tcourse.create_seminar_group(sem, studentlist)\n\n\t\t\t\t# go on to the next group\n\t\t\t\tsem += 1\n\n\t\t# same for practical\n\t\tprac = 1\n\t\tif course.practicals > 0:\n\t\t\tfor i in range(0, len(course.studentnames), course.maxstudentsprac):\n\t\t\t\tstudentlist = course.studentnames[i: i + course.maxstudentsprac]\n\t\t\t\tcourse.create_practical_group(prac, studentlist)\n\t\t\t\tprac += 1\n\n\n\treturn allcourses, student_list", "def complement_course(allcourses, schedule, chambers, student_list):\n\n\t# for each course\n\tfor course in allcourses:\n\n\t\t# schedule activities\n\t\tschedule_class(course, \"lecture\", schedule, chambers, student_list)\n\t\tschedule_class(course, \"seminar\", schedule, chambers, student_list)\n\t\tschedule_class(course, \"practical\", schedule, chambers, student_list)\n\n\treturn allcourses, schedule, chambers, student_list", "def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')", "def all_courses(records):\n \n course_and_id_dict = {} #This creates an empty dictionary\n for all_tuples in records:\n course_info_tuple = all_tuples[0] #Extracts all course information\n course_id = course_info_tuple[0]\n course_name = course_info_tuple[1]\n \n course_and_id_dict[course_id] = course_name\n \n return course_and_id_dict", "def see_teaching_courses(self, username: str, token: str) -> List[Dict[str, object]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get UID from user's username\n uid = self.get_uid(username=username)\n\n # Query database for courses instructed by a user with this UID\n cursor = self._db_connection.cursor()\n cursor.execute(\n '''\n SELECT \n course_id,\n course_abbreviation,\n course_name, \n time,\n seats \n FROM \n courses\n WHERE \n instructor_id = ?\n ;\n ''', (uid,))\n\n db_results = cursor.fetchall()\n\n if db_results is None:\n print(\"No associated courses found!\")\n return []\n\n # Build information dicts for every course this user is instructing\n courses = []\n for result in db_results:\n # Get the number of students enrolled in this course already\n cursor.execute('''SELECT COUNT(*) FROM enrollment_records WHERE course_id = ?;''', (result[0],))\n students_enrolled = cursor.fetchone()[0]\n if students_enrolled is None:\n students_enrolled = 0\n\n # Build a course dict from the data\n courses.append({\n \"course_abbreviation\": result[1],\n \"course_name\": result[2],\n \"time\": result[3],\n \"students_enrolled\": students_enrolled,\n \"capacity\": result[4],\n })\n\n return courses", "def filter_sections(courses, selected_sections):\n for c in courses:\n c_key = f\"{c.name} {c.num}\"\n\n lab_section = selected_sections[c_key][\"lab\"]\n lecture_section = selected_sections[c_key][\"lecture\"]\n tutorial_section = selected_sections[c_key][\"tutorial\"]\n\n c.labs = [s for s in c.labs if s.section == lab_section]\n c.lectures = [s for s in c.lectures if s.section == lecture_section]\n c.tutorials = [s for s in c.tutorials if s.section == tutorial_section]", "def export_courses():\n courses = Course.query().fetch()\n dictionary = {}\n\n for course in courses:\n dictionary[course.department + \"\" + course.number] = course.to_dict()\n\n return dictionary", "def _transform_learning_resource_course(course):\n return {\n \"readable_id\": course[\"readable_id\"],\n \"platform\": PlatformType.xpro.value,\n \"title\": course[\"title\"],\n \"image\": {\"url\": course[\"thumbnail_url\"]},\n \"offered_by\": copy.deepcopy(OFFERED_BY),\n \"description\": course[\"description\"],\n \"url\": course.get(\"url\"),\n \"published\": any(\n map(\n lambda course_run: course_run.get(\"current_price\", None),\n course[\"courseruns\"],\n )\n ),\n \"topics\": transform_topics(course.get(\"topics\", [])),\n \"runs\": [_transform_run(course_run) for course_run in course[\"courseruns\"]],\n \"resource_type\": LearningResourceType.course.value,\n }", "def get_courses(std):\n return std[2]", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def upcoming_courses(aud):\n \n courses = [c for c in aud.all_courses() if c.grade == u\"*\"]\n return [c.number.replace(\"-\", \"\") for c in courses]", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def _accessible_courses_list_from_groups(request):\r\n courses_list = {}\r\n\r\n instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()\r\n staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()\r\n all_courses = instructor_courses | staff_courses\r\n\r\n for course_access in all_courses:\r\n course_key = course_access.course_id\r\n if course_key not in courses_list:\r\n course = modulestore('direct').get_course(course_key)\r\n if course is None:\r\n raise ItemNotFoundError(course_key)\r\n courses_list[course_key] = course\r\n\r\n return courses_list.values()" ]
[ "0.7081613", "0.6395153", "0.6332284", "0.62050134", "0.6196131", "0.60946196", "0.60392576", "0.6026868", "0.5937411", "0.59108996", "0.57097995", "0.56979233", "0.56891394", "0.5687594", "0.5673657", "0.56333023", "0.561568", "0.5600831", "0.5598359", "0.5592875", "0.55716544", "0.5557094", "0.5543492", "0.5538011", "0.55128", "0.5500342", "0.5499333", "0.54939795", "0.54808414", "0.5458383" ]
0.786374
0
Read through the text file to retrieve each word and its embedding. If a word starts with an alphabetic character, create a WordEmbedding object for that word and its embedding and insert it in its proper place in the BTree.
def buildBTree(T, file): for line in file: word_line = line.split(' ') word = word_line[0] embedding = word_line[1:] embedding = [float(i) for i in embedding] if word[0].isalpha(): word_emb_object = WordEmbedding.WordEmbedding(word, embedding) insertElement(T, word_emb_object) return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_from_text(self, file_name):\n with open(file_name, 'r') as reader:\n words_list = []\n for line in reader:\n words_list.extend(line.split())\n\n for word in set(words_list):\n if word.isalpha():\n self.insert_word(word.lower())\n else:\n self.insert_word(''.join([c for c in word if c.isalpha()]).lower())", "def _read_words(self, path):\r\n\r\n word_file = open(path)\r\n for line in word_file.readlines():\r\n pair = line.split('::')\r\n self.insert(pair[0], pair[1].rstrip())\r\n word_file.close()", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def load(self, file_name):\n try:\n [self.add_word(w) for w in open(file_name).read().splitlines()]\n except IOError as e:\n print(e)", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict", "def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings", "def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index", "def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300", "def load_text(file: Union[str, bytes, int, PathLike],\n lossy: bool = False) -> Embeddings:\n with open(file, encoding='utf8',\n errors='replace' if lossy else 'strict') as inf:\n try:\n first = next(inf)\n except StopIteration:\n raise ValueError(\"Can't read from empty embeddings file.\")\n line = _ASCII_WHITESPACE_PAT.split(first.rstrip())\n cols = len(line[1:])\n rows = sum(1 for _ in inf) + 1\n inf.seek(0)\n return _load_text(inf, rows, cols)", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def load_words():\n # Load all the words from the scrabble dictionary into a python list, words\n fname = 'words.txt'\n with open(fname) as fh:\n words = fh.readlines()\n \n # Create a python dict keyed by sorted letters, with value equal to a list\n # of all the anagrams of that collection of letters\n anagram_tbl = dict()\n for word in words:\n word_lc = word.rstrip().lower()\n key = word_key(word_lc)\n value = anagram_tbl.get(key, []) + [word_lc]\n anagram_tbl[key] = value\n return anagram_tbl", "def load_embed_text(embed_file):\n \n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embeddings should be same size\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size", "def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim", "def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist", "def load_embeddings(filename):\n count = 0\n matrix = []\n word_map = {}\n with open(filename, encoding=\"utf8\") as f:\n # with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n word = items[0]\n rest = items[1:]\n # print(\"word:\", word)\n word_map[word] = count\n count += 1\n\n rest = list(map(float, rest))\n matrix.append(rest)\n matrix = np.array(matrix)\n return word_map, matrix", "def readFile(filename):\n listOfWords = []\n currentLine = 1\n f = open(filename, \"r\")\n for line in f:\n line = stripPunctuation(line)\n for word in line.split():\n word = word.lower()\n if len(word) > 1:\n if not word[0].isdigit():\n tempObj = contains(listOfWords, word)\n if tempObj != None:\n tempObj.incOccurrence(currentLine)\n else:\n temp = Word(word, currentLine)\n listOfWords.append(temp)\n currentLine = currentLine + 1\n return listOfWords", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def get_words(file_name, letters):\r\n with open(file_name, encoding = 'utf-8') as file:\r\n correct_dict = {\"/n\":\"noun\", \"noun\":\"noun\", \"/v\":\"verb\", \"verb\":\"verb\",\r\n \"/adj\":\"adjective\", \"adj\":\"adjective\", \"adv\":\"adverb\"}\r\n word_list = []\r\n for line in file:\r\n for key, value in correct_dict.items():\r\n word = line.split()[0]\r\n if key in line and len(word)<=5:\r\n if word[0] in letters and word not in word_list:\r\n word_list.append((word, value))\r\n break\r\n return word_list", "def get_words(txtfile):\n\n global _wordset\n global _postrie\n\n f = open(txtfile,'r')\n _wordset = set([x.lower() for x in set(f.read().split()) \\\n if not re.match('.*[\\W,\\d]|^.$',x)])\n\n #print('building suffix trie')\n _postrie = trienode(pre = False)\n _postrie.grow(_wordset)\n\n # Since this will be recursed through later, take care of it now.\n if len(_wordset) > sys.getrecursionlimit():\n sys.setrecursionlimit(len(_wordset))", "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def read_file_into_tree(file_name, tree):\n if not isinstance(file_name, str):\n raise ValueError(\"'str required here. Created by @Edd1e234'\")\n\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(\"File not found\")\n raise FileNotFoundError(file_name + \"not found...\")\n\n count = 0\n\n print(\"Inserting\")\n\n for line in file:\n words = line.split(\" \")\n if words[0].isalpha():\n # Checking if there actual float numbers.\n vector_list = []\n for i in words[1:]:\n try:\n vector_list.append(float(i))\n except ValueError:\n # Raises Exception\n raise ValueError(\"This file does not contain correct data. Does not contain float numbers. '\", i,\n \"' Created by @Edd1e234\")\n # Creates the object wrapper class.\n key = ObjectKey(words[0], vector_list)\n tree.insert(key)\n count += 1\n print(count)\n file.close()" ]
[ "0.6896994", "0.6556667", "0.6526634", "0.6506346", "0.65016085", "0.6407096", "0.6287741", "0.62576973", "0.62347907", "0.62207425", "0.62117535", "0.6209658", "0.6187981", "0.6147249", "0.61393017", "0.61357147", "0.61202615", "0.607361", "0.6070773", "0.6067271", "0.6066581", "0.60651684", "0.60644144", "0.6051351", "0.6017496", "0.60129076", "0.60100013", "0.600948", "0.5978106", "0.5962649" ]
0.79228294
0
If k, a word string, is in the BTree, determine the value of c, such that k must be in the subtree T.child[c]. This function helps search a word in the BTree by comparing it to the word in each WordEmbedding object in the node.
def findChildB(T,k): for i in range(len(T.data)): if k < T.data[i].word: return i return len(T.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def search(self, word):\n node = self.root\n for i in word:\n if i not in node.children:\n return False\n node = node.children[i]\n return node.word", "def search(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return node.word", "def search(self, word):\n if not word:\n return False\n discrepancy = 1\n stack = [(child, word, discrepancy)\n for child in self.root.children.values()]\n while stack:\n curr_node, curr_word, curr_disc = stack.pop()\n if len(curr_word) == 1:\n if curr_node.nil:\n if curr_disc == 0 and curr_word == curr_node.key:\n return curr_node.nil\n elif curr_disc == 1 and curr_word != curr_node.key:\n return curr_node.nil\n else:\n for key, child in curr_node.children.items():\n if curr_word[0] == curr_node.key:\n stack.append((child, curr_word[1:], curr_disc))\n elif curr_disc> 0:\n stack.append((child, curr_word[1:], curr_disc - 1))\n return False", "def search(self, word: str):\n node = self.root\n for letter in word:\n if letter in node.child:\n node = node.child[letter]\n else:\n return False\n return node.is_leaf", "def findChildA(T,k): \r\n for i in range(len(T.data)):\r\n if k.word < T.data[i].word:\r\n return i\r\n return len(T.data)", "def search(self, word):\n node = self.root\n for c in word:\n if c in node.children:\n node = node.children[c]\n else:\n return False\n return node.word_end", "def search(self, word: str) -> bool:\n #start from the root\n node = self.root\n for char in word:\n if char in node.child:\n node = node.child.get(char)\n else:\n return False\n return node.isWord", "def search(self, word: str) -> bool:\n node = self.root\n for c in word:\n if c not in node.children:\n return False\n node = node.children[c]\n return True", "def __search_tree(word, index=0, node=None):\n if index + 1 > len(word):\n return node\n\n current_key = word[index]\n\n child_node = _Node.__find_key_in_level(node, current_key)\n\n if not child_node:\n return False\n\n return _Node.__search_tree(word, index + 1, child_node)", "def search(self, word):\n nodes = [self.root]\n print self.root.children\n i = 0\n while i < len(word):\n tmp = []\n for node in nodes:\n if word[i] in node.children:\n tmp.append(node.children[word[i]])\n elif word[i] == '.':\n tmp.extend(node.children.values())\n if not tmp:\n return False\n nodes = tmp\n i += 1\n return True", "def search(self, word):\n current = self.root\n for letter in word:\n current = current.children.get(letter)\n \n if current is None:\n return False\n return current.is_word", "def search(self, word: str) -> bool:\n current = self.root\n for letter in word: \n current = current.children.get(letter)\n if not current:\n return False\n return current.is_word", "def contains_word(root, input_word):\n\n cNode = root\n\n for char in list(input_word):\n found_match = False\n for node in cNode.nodes:\n if node.char == char:\n found_match = True\n cNode = node\n break\n\n if not found_match:\n print(\"Exited in for loop\")\n return False\n\n return cNode.is_word", "def search(self, word):\n def _subSearch(node, word):\n if not word:\n return node.isWord\n\n contains = False\n if word[0] == '.':\n for c in node.children:\n contains |= _subSearch(node.children[c], word[1:])\n if contains:\n return True\n elif word[0] in node.children:\n contains |= _subSearch(node.children[word[0]], word[1:])\n\n return contains\n\n return _subSearch(self.root, word)\n\n\n # cur = self.root\n # nodes = []\n # nodes.append(cur)\n\n # for c in word:\n # # new_nodes = []\n # # for node in nodes\n # # if c == '.':\n # if c not in cur.children:\n # return False\n \n # cur = cur.children[c]", "def search(self, word):\n curNode = self.root\n for c in word:\n if not c in curNode:\n return False\n curNode = curNode[c]\n \n # Doesn't end here\n if not self.end in curNode:\n return False\n \n return True", "def search(self, key): \n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n if not current_node.children[index]: \n return False\n current_node = current_node.children[index] \n \n return current_node != None and current_node.isEndOfWord", "def search(self, word: str) -> bool:\n node = self.root\n for w in word:\n node = node.children.get(w)\n if not node:\n return False\n return node.end", "def search(self, word):\n q = collections.deque()\n q.append(self.root)\n start = 0\n while len(q):\n size = len(q)\n for _ in range(size):\n cur = q.popleft()\n if start==len(word):\n if cur.isWord:\n return True\n continue\n if word[start]!=\".\":\n if word[start] in cur.children:\n q.append(cur.children[word[start]])\n else:\n for k in cur.children:\n q.append(cur.children[k])\n start += 1\n return False", "def search(self, word):\n #edge case\n if word == \"\": \n return True if self._dict.children[26] != None else False\n\n cur = self._dict\n for c in word:\n ind = ord(c) - 97\n if cur.children[ind] == None:\n return False\n cur = cur.children[ind]\n\n return True if cur.isleaf == True else False", "def search(self, word: str) -> bool:\n curr = self.root\n for c in word:\n if not c in curr.adj:\n return False\n curr = curr.adj[c]\n return curr.isWord", "def search(self, word: str) -> bool:\n curr = self.root\n for ch in word:\n curr = curr.children.get(ch)\n if curr is None:\n return False\n return curr.is_word", "def search(self, word: str) -> bool:\n n = self.root\n for l in word[0:-1]:\n cn = n.get_child_with_val(l)\n if cn == None or cn.eow:\n return False\n n = cn\n\n last_node = n.get_eow_child_with_val(word[-1])\n if last_node == None:\n return False\n return True", "def search(self, word):\n currNode = self.root\n\n for c in word:\n if c not in currNode.children:\n return False\n currNode = currNode.children[c]\n return currNode.isEnd", "def search(self, word):\n node = self.root\n for char in word:\n if char in node.dict:\n node = node.dict[char]\n else:\n return False\n if node.end:\n return True\n return False", "def search(self, word: str) -> bool:\n \n def helper(n, sub):\n if not sub:\n return n.rec > 0\n \n for i, c in enumerate(sub):\n if c == \".\":\n for l in n.childs:\n if helper(n.childs[l], sub[i+1:]):\n return True\n return False\n else:\n if c not in n.childs:\n return False\n n = n.childs[c]\n \n return n.rec > 0\n \n trav = self.root\n \n for i, c in enumerate(word):\n if c == \".\":\n for l in trav.childs:\n if helper(trav.childs[l], word[i+1:]):\n return True\n return False\n else:\n if c not in trav.childs:\n return False\n trav = trav.childs[c]\n \n return trav.rec > 0", "def search(self, word: str) -> bool:\r\n nroot=self.root\r\n for j in word:\r\n \r\n # index=ord(j)-ord('a')\r\n if j not in nroot.children:\r\n return False\r\n nroot=nroot.children[j] \r\n return nroot.endofword", "def search(self, word: str) -> bool:\n parent = self.root\n for char in word:\n if char not in parent.children:\n return False\n parent = parent.children[char]\n return parent.endhere", "def search(self, word):\n return self.subsearch(self.root, word)", "def search(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return node.endOfWord" ]
[ "0.7309766", "0.6948749", "0.6895554", "0.6835559", "0.6707002", "0.664024", "0.6602095", "0.6581776", "0.64997524", "0.6494112", "0.6449155", "0.64359915", "0.6392801", "0.6356252", "0.6351559", "0.63251305", "0.6320364", "0.62917066", "0.62825304", "0.62693983", "0.6267385", "0.6266533", "0.6258205", "0.6240689", "0.6227414", "0.6217905", "0.6175606", "0.6138254", "0.61302865", "0.61062914" ]
0.70231426
1
Return the WordEmbedding object from the node where k, a word string, is found in the BTree. If k is not in the tree, return None.
def search(T,k): for t in T.data: if k == t.word: return t if T.isLeaf: return None return search(T.child[findChildB(T,k)],k)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(self, word):\n node = self.root\n for i in word:\n if i not in node.children:\n return False\n node = node.children[i]\n return node.word", "def search(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return node.word", "def get_root(word):\n try:\n query = {'word': word}\n cursor = database['Words'].find(query)\n \n if cursor is None:\n return None\n for document in cursor:\n if len(document['roots']) > 0:\n return document['roots']\n except Exception as e:\n print(e)\n return None\n\n '''\n nlp = stanfordnlp.Pipeline(lang='hi')\n doc = nlp(word)\n for sentence in doc.sentences:\n for word in sentence.words:\n return word.lemma\n '''", "def __search_tree(word, index=0, node=None):\n if index + 1 > len(word):\n return node\n\n current_key = word[index]\n\n child_node = _Node.__find_key_in_level(node, current_key)\n\n if not child_node:\n return False\n\n return _Node.__search_tree(word, index + 1, child_node)", "def search_prefix(self, word):\n node = self.root\n for char in word:\n if char in node:\n node = node[char]\n else:\n return None\n return node", "def _traverse(self, word):\n node = self.root\n for i in (ord(x)-97 for x in word):\n if not node.data[i]: return None\n node = node.data[i]\n return node", "def get_leaf_node(self, current_word):\n node = self.wordlist.find(current_word)\n\n if node is None:\n # current word is not in the Trie\n return None\n elif node.value != TRIE_BRANCH:\n # current word is already a leaf\n return current_word\n \n # descend down a random branch down the trie\n # until we hit a leaf\n while node.children:\n next_letter = random.choice(list(node.children.keys()))\n current_word += next_letter\n node = node.children.get(next_letter)\n \n return current_word", "def search(self, word):\n if not word:\n return False\n discrepancy = 1\n stack = [(child, word, discrepancy)\n for child in self.root.children.values()]\n while stack:\n curr_node, curr_word, curr_disc = stack.pop()\n if len(curr_word) == 1:\n if curr_node.nil:\n if curr_disc == 0 and curr_word == curr_node.key:\n return curr_node.nil\n elif curr_disc == 1 and curr_word != curr_node.key:\n return curr_node.nil\n else:\n for key, child in curr_node.children.items():\n if curr_word[0] == curr_node.key:\n stack.append((child, curr_word[1:], curr_disc))\n elif curr_disc> 0:\n stack.append((child, curr_word[1:], curr_disc - 1))\n return False", "def find(self, word):\n\n curr = self.head\n words = []\n # Do we at least contain the whole word?\n for letter in word:\n if letter in curr.children:\n curr = curr.children[letter]\n else:\n return words\n\n queue = [curr]\n\n while len(queue):\n curr = queue.pop()\n\n if \"_end\" in curr.children:\n words.append(curr.data)\n\n queue = [node\n for _, node in\n curr.children.iteritems()] + queue\n\n return words", "def find_nearest_k(word, vocab, model, k=5):\n ix2vocab = {vocab[w]: w for w in vocab}\n vocab_matrix = model.embedding_layer.parameters()['W'].data\n word_ix = vocab[word]\n word_embed = vocab_matrix[word_ix]\n top_k = []\n for ix in xrange(vocab_matrix.shape[0]):\n if ix == word_ix:\n continue\n dist = -distance(word_embed, vocab_matrix[ix])\n if len(top_k) < k:\n h.heappush(top_k, (dist, ix2vocab[ix]))\n else:\n if top_k[0][0] < dist:\n h.heappop(top_k)\n h.heappush(top_k, (dist, ix2vocab[ix]))\n return top_k", "def find_vertex_from_word(self, word):\n for vertex in self.vertices:\n if vertex.word == word:\n return vertex\n\n return None", "def search(self, word):\n return self.helper(word, self.root)", "def search(self, word):\n return self.helper(word, self.root)", "def search(self, word):\n return self.searchRecursive(self.root, word)", "def search(self, word):\n node = self.root\n return self.searchHelper(node, word)", "def get_embedding(self, embed_id):\n\t\tif not embed_id in self.embedding_meta:\n\t\t\treturn None\n\t\tif embed_id in self.embedding_cache:\n\t\t\tlog.info(\"Using cached embedding for %s\" % embed_id)\n\t\t\treturn self.embedding_cache[embed_id]\n\t\t# load the associated word embedding\n\t\tem = self.embedding_meta[embed_id]\n\t\tin_path = em.dir_base / em[\"file\"]\n\t\tlog.info(\"Loading word embedding from %s\" % in_path)\n\t\ttry:\n\t\t\tself.embedding_cache[embed_id] = Embedding(in_path)\n\t\texcept Exception as e:\n\t\t\tlog.warning(\"Failed to load word embedding: %s\" % in_path)\n\t\t\tlog.warning(e)\n\t\t\treturn None\n\t\treturn self.embedding_cache[embed_id]", "def search(self, word):\r\n return self.DFS(word, 0, 0, self.trie.root)", "def search(self, word):\n return self.find(word, 0, self.root)", "def get_embeddings_for_word(target_word, tt, token_embeddings, sentence, anaphor = False):\n if target_word not in tt:\n og_index = find_og_index(target_word, sentence)\n return get_wordpiece_embeddings(og_index, tt, token_embeddings)\n\n ## Get all indices of target word\n indices = [i for i, x in enumerate(tt) if x == target_word]\n if anaphor:\n target_index = indices[-1]\n else:\n target_index = indices[0]\n # Index of target word\n # target_index = tt.index(target_word)\n\n return token_embeddings[target_index]", "def search(self, word: str):\n node = self.root\n for letter in word:\n if letter in node.child:\n node = node.child[letter]\n else:\n return False\n return node.is_leaf", "def embed_word(self):\n return self.emb.get_keras_embedding(dropout = self.emb_dropout,\n trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def search(self, word):\n return self.find(self.root,word)", "def search(self, word):\n return self.__search(self.__root, word,0)", "def embed_word(self):\n return self.emb.get_keras_embedding(trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def search(self, word: str) -> bool:\n #start from the root\n node = self.root\n for char in word:\n if char in node.child:\n node = node.child.get(char)\n else:\n return False\n return node.isWord", "def search(self, word):\n q = collections.deque()\n q.append(self.root)\n start = 0\n while len(q):\n size = len(q)\n for _ in range(size):\n cur = q.popleft()\n if start==len(word):\n if cur.isWord:\n return True\n continue\n if word[start]!=\".\":\n if word[start] in cur.children:\n q.append(cur.children[word[start]])\n else:\n for k in cur.children:\n q.append(cur.children[k])\n start += 1\n return False", "def search(self, word):\n node = self.root\n for c in word:\n if c in node.children:\n node = node.children[c]\n else:\n return False\n return node.word_end", "def findChildB(T,k):\r\n for i in range(len(T.data)):\r\n if k < T.data[i].word:\r\n return i\r\n return len(T.data)", "def search(self, word):\n def _subSearch(node, word):\n if not word:\n return node.isWord\n\n contains = False\n if word[0] == '.':\n for c in node.children:\n contains |= _subSearch(node.children[c], word[1:])\n if contains:\n return True\n elif word[0] in node.children:\n contains |= _subSearch(node.children[word[0]], word[1:])\n\n return contains\n\n return _subSearch(self.root, word)\n\n\n # cur = self.root\n # nodes = []\n # nodes.append(cur)\n\n # for c in word:\n # # new_nodes = []\n # # for node in nodes\n # # if c == '.':\n # if c not in cur.children:\n # return False\n \n # cur = cur.children[c]", "def insert_word_to_trie(self, word):\n if self.root is None:\n self.root = TrieNode(None)\n \n word_len = len(word)\n \n iter = self.root\n index = 0\n while word_len > index:\n start = index\n ch = word[index]\n prefix_word = ch\n # Check if the character is an alphabet\n while self._is_alphabet(ch):\n index = index + 1\n if word_len <= index:\n break\n ch = word[index]\n # Get the word slice separated by a delimiter \n tag = word[start:index]\n if iter.children.get(tag) is None:\n # This sliced tag doesn't exist on the trie yet so add it.\n node = TrieNode(tag)\n iter.children[tag] = node\n else:\n # This sliced tag already exists on the Trie.\n # If it was a leaf node before then it might not be a leaf node now.\n iter.children[tag].is_leaf = False\n iter.actual_word = []\n \n #Move to the next delimiter separated word tag\n iter = iter.children[tag]\n index = index + 1\n \n # Mark last node as leaf.\n iter.is_leaf = True\n # Append the actual word to the leaf\n # Could be a list for example same sliced words with different delimiters like abc_cde_fgh and abc-cde-fgh\n iter.actual_word.append(word)" ]
[ "0.6154507", "0.6151499", "0.61069804", "0.6043727", "0.59671444", "0.5961432", "0.59472084", "0.5831675", "0.5821121", "0.5751776", "0.57412803", "0.5738347", "0.5738347", "0.57238066", "0.5716013", "0.568632", "0.564064", "0.56310326", "0.56223166", "0.55864", "0.55842125", "0.5556334", "0.5546044", "0.5545107", "0.5536529", "0.5524301", "0.55053574", "0.5491301", "0.5484345", "0.5481769" ]
0.6870404
0
Traverse through the entire BTree to calculate the number of nodes it has.
def numNodes(T): n = 1 if T.isLeaf: return n for i in range(len(T.child)): n += numNodes(T.child[i]) return n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n total_count = total_count + child_count\n\n return total_count+1", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def tree_size(self) -> int:\n Q = Queue()\n count = 0\n Q.put(self.root)\n while not Q.empty():\n node = Q.get()\n count += 1\n for child in node.children.values():\n Q.put(child)\n return count", "def size(self) -> int:\n #binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper count nodes\n return self.size_helper(self.root)", "def get_tree_size(self, node):\n\n # If the tree has not been created yet.\n if node == None:\n return 0\n n_nodes = 1\n for child in node.children:\n n_nodes += self.get_tree_size(node.children[child])\n return n_nodes", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def count(self):\n return self.__tree.node_count", "def node_count(self):\n return self._root.count()", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def countNodes(self, root):\n\n\n if not root:\n return 0\n\n return 1+self.countNodes(root.left)+self.countNodes(root.right)", "def count(self):\n\t\treturn len(list(self.nodes))", "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def GetCount(self):\r\n\r\n if not self._anchor:\r\n # the tree is empty\r\n return 0\r\n\r\n count = self._anchor.GetChildrenCount()\r\n \r\n if not self.HasAGWFlag(TR_HIDE_ROOT):\r\n # take the root itself into account\r\n count = count + 1\r\n \r\n return count", "def count(self):\r\n return self.count_helper(self.top_node)", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def node_count(self):\n return self.process_tree.get_descendant_count() + 1", "def n_trees(self):\n return len(self.data_kd)", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def count_taxa_tree(tree_nxobj):\n\tnode_count = 0 #number of taxa in the tree\n\tfor node in tree_nxobj.preorder_node_iter():\n\t\tnode_count += 1\n\n\treturn node_count", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def count_leaves(self) -> int:\n # binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper function +=count total leaf\n return self.count_leaves_helper(self.root)", "def __len__(self):\n return len(self.subtrees())", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def node_count(self):\n return self._node_count", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def leaf_count(t: Tree) -> int:\n if t.children == []:\n return 1\n else:\n return sum([leaf_count(child) for child in t.children])", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n s = 0\n for child in T:\n s += leaf_count(child)\n return s\n # Can you put the else clause in one line instead?\n return functools.reduce(operator.add, map(leaf_count, T), 0)" ]
[ "0.8126878", "0.78849125", "0.7833212", "0.78316116", "0.7772799", "0.77459306", "0.77308065", "0.7716238", "0.7689622", "0.7682001", "0.767653", "0.76338", "0.74925274", "0.7490429", "0.7453843", "0.73934203", "0.7377626", "0.7375549", "0.73751295", "0.73481226", "0.7330711", "0.7323081", "0.7319196", "0.72478944", "0.72407323", "0.71964794", "0.7144839", "0.7126776", "0.7124476", "0.71089065" ]
0.8063285
1
Traverse through a single path of the BTree (from the root to a single leaf) to calculate its height.
def height(T): if T.isLeaf: return 0 return 1 + height(T.child[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def height(self) -> int:\n # binary search tree == empty\n if self.root is None:\n return -1\n\n #count number\n return self.height_helper(self.root)", "def get_height(self):\n def _get_height(node, height=None):\n if not height:\n height = self._get_level(node) + 1\n if node.left:\n height = _get_height(node.left, height+1)\n if node.right:\n height = max(height, _get_height(node.right, height+1))\n if not node.left and not node.right:\n height = self._get_level(node)\n return height\n return _get_height(self.root)", "def height(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 0\n else:\n if self.has_left():\n if self.has_right():\n return 1+max(self.get_left().height(), self.get_right().height())\n else:\n return 1+self.get_left().height()\n else:\n return 1+self.get_right().height()", "def _height1(self): #works but n^2 time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def calculate_tree_height(tree):\n max_height = 0\n for i in tree.values():\n if i.is_leaf():\n path = i.path_to_root()\n if len(path) > max_height:\n max_height = len(path)\n\n return max_height", "def get_height_tree(self):\n layers = self.breadth_first_traversal()\n \n if all(node is None for node in layers[-1]):\n del layers[-1]\n \n height = len(layers) - 1\n return height", "def height(self, p=None):\n if p is None:\n p = self.root()\n return self._height2(p) # start _height2 recursion", "def height(node): \n if node is None:\n return -1\n \n # select the top two heights:\n max_height_1, max_height_2 = -1, -1\n for child in node.children:\n h = height(child) + 1\n if h > max_height_1:\n max_height_1, max_height_2 = h, max_height_1\n elif h > max_height_2:\n max_height_2 = h\n \n self.diameter = max(self.diameter, max_height_1 + max_height_2 + 2)\n \n return max_height_1", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def height(rbt):\n try:\n return heightTree(rbt['root'])\n except Exception as exp:\n error.reraise(exp, 'RBT:height')", "def height(t: Tree):\n if len(t.children) == 0:\n return 1\n else:\n return 1 + max([height(c) for c in t.children])", "def heightTree(root):\n try:\n if (root is None):\n return -1\n else:\n return 1 + max(heightTree(root['left']), heightTree(root['right']))\n except Exception as exp:\n error.reraise(exp, 'RBT:heightTree')", "def get_height(self, treenode=self):\n\t\treturn self.__get_height(treenode)", "def height(self):\n # Check if root node has a value and if so calculate its height\n return self.root.height() if self.root is not None else -1", "def height(self, p=None):\n if p is None:\n p = self.root()\n return self._height2(p) # start _height2 recursion", "def height(self, p = None):\n if p is None:\n p = self.root()\n return self._height2(p) # start _height2 recursion", "def _height2(self, p):\n if self.is leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def _get_height(self, root: AVLTreeNode) -> int:\n if not root: # empty tree means height of 0\n return 0\n else:\n return root.height # return instance var height", "def _height2(self, p): # time is linear in size of subtree\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def get_height(self):\n if self.root is None:\n return 0\n else:\n return self._get_height(self.root) # Start at the root", "def _height2(self, p): # time is linear in size of subtree\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def height(node):\r\n \r\n height = 0\r\n temp = node\r\n while temp != None:\r\n temp = temp.parent\r\n height += 1\r\n return height", "def height(self, p=None):\n\n if p is None:\n p = self.root()\n return self._height2(p) # start height2 recursion", "def height(self) -> int:\n if self.root is None:\n return -1\n\n return self.height_helper(self.root)", "def height_helper(self, node: object) -> int:\n #current node == a leaf\n if self.is_leaf(node):\n return 0\n\n #current node == a single child\n if node.left is not None and node.right is None:\n return 1 + self.height_helper(node.left)\n if node.left is None and node.right is not None:\n return 1 + self.height_helper(node.right)\n\n #node ==2 child leaf\n if self.height_helper(node.left) > self.height_helper(node.right):\n return 1 + self.height_helper(node.left)\n else:\n return 1 + self.height_helper(node.right)", "def height(self):\n if self.children == []:\n return 1 \n else:\n arr = []\n for child in self.children:\n result = 1 + child.height()\n arr.append(result)\n return max(arr)", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))" ]
[ "0.74219936", "0.7287904", "0.7255695", "0.72408116", "0.7225057", "0.7186698", "0.71231264", "0.71046185", "0.7073362", "0.7036825", "0.70221025", "0.7013034", "0.6988873", "0.69887346", "0.69838744", "0.6982113", "0.6974794", "0.69711584", "0.6967295", "0.6943627", "0.69403386", "0.6916841", "0.6907571", "0.6901492", "0.68789357", "0.6872537", "0.6852025", "0.6841019", "0.6841019", "0.6841019" ]
0.74674815
0
Function that responsible for the iteration over the events returned from github api
def _iter_events(self) -> Generator: response = self.client.call() events: list = response.json() if not events: return [] while True: yield events last = events.pop() self.client.set_next_run_filter(last['@timestamp']) response = self.client.call() events = response.json() try: events.pop(0) assert events except (IndexError, AssertionError): LOG('empty list, breaking') break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def events(self) -> Iterable[Event]:", "def get_events(self):\n\n url = '/v2.4/'+self.page_id+'/events'\n data = self.graph.request(url)\n\n while 'next' in data['paging'].keys():\n print data['paging']['next']\n data = self.graph.request(url, args={\n 'limit' : 100,\n 'after' : data['paging']['cursors']['after']\n })\n\n return data", "def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')", "def parse_event_list(self, response):\n for event in response.css(\".view-content .article-title a::attr(href)\"):\n event_url = event.extract()\n yield scrapy.Request(\n response.urljoin(event_url),\n callback=self.parse_event_page,\n dont_filter=True,\n )\n next_url = self._response_next_url(response)\n if next_url:\n yield scrapy.Request(\n response.urljoin(next_url),\n callback=self.parse_event_list,\n dont_filter=True,\n )", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def scrape_events(path, urls):\n seen_ids = set()\n result = []\n for url in urls:\n # Get all of the Network requests being sent out\n print(f'Processing {url}')\n driver.get(url)\n browser_log = driver.get_log('performance') \n events = [process_browser_log_entry(entry) for entry in browser_log]\n results = []\n # Find the Network request that sends a GET request to EventBrite API\n for event in events:\n if event['method'] == 'Network.responseReceived':\n # print(event)\n if 'event_ids' in event['params']['response']['url']:\n results.append(event)\n # Get the GET request URL\n get_url = \"\"\n # TODO: Sometimes returning 0 or more than 1... I'm not sure why :(\n if len(results) >= 1:\n get_url = results[0]['params']['response']['url']\n # Get the GET request response JSON\n json_response = get_request(get_url)\n event_list = json_response['events']\n # Find unique events in the response JSON \n unique_event_list = []\n for event in event_list:\n if event['id'] not in seen_ids:\n seen_ids.add(event['id'])\n unique_event_list.append(event)\n parsed_events = parse_event_page(unique_event_list)\n result.extend(parsed_events)\n else:\n print(results)\n print('yikes something went wrong')\n\n driver.close()\n return result\n # save_events(path, result)", "def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)", "def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events", "def access_event(self, args):\n\t\tr = requests.get(self.github+\"repos/\"+args.org+\"/\"+args.repo+\"/\"+args.event_type)\n\t\tprint(r.headers)\n\t\tprint(type(str(r.headers)))\n\t\t#self.save_to_file(str(r.headers))", "def parse_events(response):\n\n if not request_was_successful(response):\n print('WARNING: Unsuccessful HTTP response from eventful')\n return []\n\n json = response.json()\n if json.get('events') is None:\n print(\"ERROR: No eventful results on page\")\n return []\n\n # parse the events into a list of Event objects\n # print(json)\n events = []\n events.extend(map(Event, json['events']['event']))\n return events", "def get_events_batch() -> PayloadDictList:\n ...", "def test_events_list(self):\n response = self.client.get(url_for(\n 'issues.eventsresourse',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events", "def __calender_events(self):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n pt=\"Getting the upcoming latest events\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pt)\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=1, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n pq=\"No upcoming events found.\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % pt)\n self.speech.synthesize_text(pq)\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #start1=''.join(start)\n summary=event['summary']\n print start,summary\n requests.get(\"http://localhost:8080/statement?text=\"+start+\" \"+summary)", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='[email protected]', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n\n # TODO noitem found\n print(datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00'))\n\n nextStartTime = datetime.datetime.strptime(events[0]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n if delta < 0:\n print(\"capture next\")\n nextStartTime = datetime.datetime.strptime(events[1]['start']['dateTime'], '%Y-%m-%dT%H:%M:%S+09:00')\n delta = (nextStartTime - datetime.datetime.now()).total_seconds()\n\n print(delta)\n\n if NOTIFY_THRESHOLD_SECOND > delta:\n alert_time_limit()\n else:\n set_normal()\n\n\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "def test_getEventsFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + events[0]['start'] + events[0]['end'])\n invuid = '00000000000000000000000'\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n rv = self.json_get('/getEventFromId/alex', {'uid': uid})\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def events(bot, event, *args):\n yield from _printEventList(bot, event)", "def get_event_list(self):\n pass", "def parse(self, response):\n for link in response.css(\".event-entry .event-title a::attr(href)\").extract():\n yield scrapy.Request(\n response.urljoin(link), callback=self.parse_event_page, dont_filter=True\n )", "def test_api_predictor_events_get(self):\n pass", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def find_events(handler_input):\n\n length = 0\n\n events_list = requests.get(\"http://3.17.148.9:8080/events\")\n\n # check for response code from server\n if events_list.status_code == 200:\n events_list = events_list.content\n details = json.loads(events_list.decode('utf-8'))\n length = len(details)\n\n # store count of every event\n events = dict()\n\n # generate response text\n response_text = \"\"\n\n for i in range(length):\n cat = details[i]['event_category']\n if cat not in events:\n events[cat] = 1\n else:\n events[cat] += 1\n \n for event, count in events.items():\n response_text += str(count) + \" \" + event+\", \"\n\n speech_text = \"I found {} events.\".format(response_text)\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"I found {} events.\".format(response_text), speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "async def get_events(self) -> list[Event]:\n log.debug(\"Discovering events in branding repository.\")\n\n try:\n event_directories = await self.fetch_directory(\"events\", types=(\"dir\",)) # Skip files.\n except Exception:\n log.exception(\"Failed to fetch 'events' directory.\")\n return []\n\n instances: list[Event] = []\n\n for event_directory in event_directories.values():\n log.trace(f\"Attempting to construct event from directory: '{event_directory.path}'.\")\n try:\n instance = await self.construct_event(event_directory)\n except Exception as exc:\n log.warning(f\"Could not construct event '{event_directory.path}'.\", exc_info=exc)\n else:\n instances.append(instance)\n\n return instances", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('calendar', 'v3', http=http)\r\n\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = eventsResult.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n print(start, event['summary'])", "def get_event_data(self, ):\n \n if os.path.exists(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\"):\n self.bug_data_frame = pd.read_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\")\n else:\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(f\"{self.cdp_dump_path}/github_events_cdp_dump.csv\", encoding='utf-8-sig',\n index=False)\n event_parser = EventsJsonParser()\n event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants, self.event_data_frame,\n f\"{self.cdp_dump_path}/\"\n f\"{CDPConfigValues.closed_events_list_file_name}\")", "def logevents(self, events, request = None):\n for event in events:\n self.logevent(event, request)" ]
[ "0.6768615", "0.65579355", "0.6553018", "0.65295124", "0.6510908", "0.64643663", "0.6447313", "0.644498", "0.6408641", "0.63940495", "0.6309841", "0.6309484", "0.62193006", "0.61751413", "0.6155901", "0.61337835", "0.6118683", "0.6114953", "0.6112544", "0.6086963", "0.6077831", "0.6069673", "0.60658944", "0.6058369", "0.6030696", "0.6015391", "0.6010141", "0.5994147", "0.59617066", "0.59568995" ]
0.7243678
0
take a string and change all "a" in string to 1, "e" in to 2, "i" into 3, "o" into 4, "u" into 5 str > str
def modify(str1): str2 = "" for char in str1.lower(): if char == "a": str2 += "1" elif char == "e": str2 += "2" elif char == "i": str2 += "3" elif char == "o": str2 += "4" elif char == "u": str2 += "5" else: str2 += char return str2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task18_letter_replacement(text):\n if text and isinstance(text, str):\n new_text = []\n for char in text:\n new_char_index = ascii_lowercase.index(char) + 1\n new_char = ascii_lowercase[new_char_index]\n if new_char in 'aeiou':\n new_char = new_char.upper()\n new_text.append(new_char)\n return ''.join(new_text)\n else:\n raise ValueError", "def majuscule(string):\n\n res = \"\"\n toChange = True\n\n for letter in string:\n value_letter = ord(letter)\n isLetter = 65 <= value_letter and value_letter <= 92 or 96 <= value_letter and value_letter <= 122\n if isLetter:\n if toChange:\n res += chr(ord(letter) - 32)\n else:\n res += letter\n toChange = not toChange\n else:\n res += letter\n\n print(res)", "def encode1(s,n):\n r = \"\"\n for l in s:\n l = ord(l) # convert to ascii\n l = l - 97 # 'a' is 97 so we want to reduce so 'a'=0 'b'=1 etc\n l = l + n # add the offset\n l=l%26 # use mod so that we wrap around back to 'a' if we go past 'z'\n l=l+97 # and add back the 97\n r = r + chr(l)\n return r", "def change(st):\n return ''.join('1' if a in st.lower() else '0' for a in map(chr, range(97, 123)))", "def modify_string():\n modString = input(\"Please write a string. \")\n modNewStr = \"\"\n modCount = 1\n\n for letter in modString:\n if modCount < 2:\n modNewStr = letter\n else: \n modNewStr = modNewStr + \"-\" + letter * modCount\n\n modCount += 1\n \n print(\"New string: \", modNewStr)", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res", "def toGoatLatin(S):\n S = S.split(' ')\n ret = ''\n \n for i, el in enumerate(S):\n if el[0].lower() in 'aeiou':\n ret += el + 'ma' + 'a'*(i+1) + ' '\n else:\n x = el[:1]\n ret += el[1:] + x + 'ma' + 'a'*(i+1) + ' '\n return ret.rstrip(' ')", "def removeExtraChars(inStr, char):\n for i in range(5):\n inStr = inStr.replace(char+char, char)\n return inStr", "def transform(s):\r\n return 'digit ' + str(s)", "def refrm(s):\n s2 = s[5:10] + s[4] + s[0:4]\n return s2", "def transform(s):\n return 'digit ' + str(s)", "def scramble(src):\n\n output = \"\"\n\n for each in src.lower():\n diff = ord(each) - ord('a')\n\n if diff >= 0 and diff < 26:\n output += chr(ord('a') + (25 - (ord(each) - ord('a'))))\n elif each >= '0' and each <= '9':\n output += each\n\n return output", "def myfunc (some_str):\n new_string = str(some_str).lower()\n next_string = \"\"\n for i, v in enumerate(new_string):\n if i % 2 == 0:\n next_string += v.upper()\n else:\n next_string += v.lower()\n return next_string", "def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2", "def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna", "def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\"", "def mutate_string(s):\n new_s = ''\n letters = string.ascii_uppercase + ' '\n s = list(s)\n for i in range(len(s)):\n if roll_dice():\n new_s = new_s + random.choice(letters)\n else:\n new_s = new_s + s[i]\n return new_s", "def _transform(self, original, code):\n\n msg = list(original)\n for k in range(len(msg)):\n\n if msg[k].isupper():\n j = ord(msg[k]) - ord(\"A\") # Determining correct index for new character.\n msg[k] = code[j]\n\n return \"\".join(msg)", "def two_passes(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n splitted = s.split('#')\n res = ''\n\n for i in range(len(splitted)):\n j = 0\n if i + 1 < len(splitted) and len(splitted[i]) > 2:\n while j < len(splitted[i]) - 2:\n res += alpha_map[splitted[i][j]]\n j += 1\n\n if i + 1 < len(splitted):\n res += alpha_map[splitted[i][j:]]\n else:\n while j < len(splitted[i]):\n res += alpha_map[splitted[i][j]]\n j += 1\n return res", "def str2ranges(s):\n def pair2range(be):\n if be[1]-be[0] ==0: tt = chr(be[0])\n elif be[1]-be[0]==1: tt = chr(be[0]) + chr(be[1])\n else: tt = chr(be[0]) + '-' + chr(be[1])\n return tt\n lenn=len(s);\n if lenn==0 : return s\n d = ord(s[0])\n be = [d,d]; tt = ''\n for ii in range(1,lenn):\n d = ord(s[ii])\n if d-be[1]==1: be[1]=d\n else:\n tt += pair2range(be)\n be = [d,d]\n tt += pair2range(be)\n return tt", "def encode(s: str) -> str:\n result = ''\n prev = s[:1]\n cnt = 0\n for c in s:\n if c == prev:\n cnt += 1\n else:\n result += str(cnt) + prev\n prev = c\n cnt = 1\n result += str(cnt) + prev\n\n return result", "def compress_v3(string):\n\n string_dict = collections.OrderedDict()\n final = \"\"\n\n for letter in string:\n string_dict[letter] = string_dict.get(letter, 0)+1\n\n for letter, count in string_dict.iteritems():\n final += letter + str(count)\n\n return final", "def encripto(string_in):\n \n string_out=\"\"\n for char in string_in:\n index_letra=letras.index(char)\n string_out+= letras[index_letra-1]\n \n return string_out", "def atbash_cipher(s):\n try:\n new_s = \"\"\n for l in s:\n if string.ascii_lowercase.find(l, 0) != -1:\n pos = string.ascii_lowercase.find(l, 0)\n reverse = string.ascii_lowercase[::-1]\n new_s += reverse[pos]\n elif string.ascii_uppercase.find(l, 0) != -1:\n pos = string.ascii_uppercase.find(l, 0)\n reverse = string.ascii_uppercase[::-1]\n new_s += reverse[pos]\n else:\n new_s += l\n return new_s\n except (ValueError, IndexError) as ex:\n print(EXCEPTION_MESSAGE, ex)", "def compress_v2(string):\n\n result = \"\"\n\n l = len(string)\n\n # Edge cases\n if l == 0:\n return \"\"\n\n if l == 1:\n return string + \"1\"\n\n last = string[0]\n count = 1\n i = 1\n\n while i < l:\n if string[i] == string[i-1]:\n count += 1\n else:\n result = result + string[i-1] + str(count)\n count = 1\n\n i += 1\n\n # For the last letter\n result = result + string[i-1] + str(count)\n\n return result", "def encode(string):\n return ' '.join(partition(decode(string), 5))", "def str2rangesUC(s):\n def i2uhex(i):\n s=hex(i);\n if s[:2]=='0x': s=s[2:]\n s='\\\\u'+s.rjust(4,'0')\n return s\n def pair2ucode(be):\n if be[1]-be[0] ==0: tt = i2uhex(be[0])\n elif be[1]-be[0]==1: tt = i2uhex(be[0]) + i2uhex(be[1])\n else: tt = i2uhex(be[0]) + '-' + i2uhex(be[1])\n return tt\n lenn=len(s)\n if lenn==0 : return s\n d = ord(s[0])\n be = [d,d]; tt = ''\n for ii in range(1,lenn):\n d = ord(s[ii])\n if d-be[1]==1: be[1]=d\n else:\n tt += pair2ucode(be)\n be = [d,d]\n tt += pair2ucode(be)\n return tt", "def reverse_vowels(s):\n\n phrase = \"\"\n vowels = []\n for letter in s:\n if letter.lower() in \"aeiou\":\n phrase += \"~\"\n vowels.append(letter)\n else: \n phrase += letter\n \n index = 0\n new_phrase = \"\"\n vowels = vowels[-1:-len(vowels)-1:-1]\n \n for letter in phrase:\n\n if letter == \"~\":\n new_phrase += vowels[index]\n index += 1\n else:\n new_phrase += letter\n\n return new_phrase", "def decode(s):\n start = 0\n multiplier = 1\n for char in s[::-1]:\n start += multiplier * LETTERS.index(char)\n multiplier = multiplier * 58\n return start", "def encode2(s,n):\n r = [ chr(((ord(x)-97+n)%26)+97) if x!=' ' else x for x in s]\n return \"\".join(r)" ]
[ "0.6553749", "0.64673", "0.6436828", "0.64264154", "0.64101595", "0.6340406", "0.6326642", "0.6203122", "0.60958964", "0.6075187", "0.6053977", "0.6053486", "0.60463107", "0.5976479", "0.5946494", "0.58518916", "0.5837796", "0.5811565", "0.58044684", "0.5779137", "0.57752246", "0.5760923", "0.5755905", "0.5739461", "0.57324994", "0.57262146", "0.57234466", "0.5715837", "0.57115966", "0.56995356" ]
0.79044306
0
Test case for create_response_descriptor_subscriptions_subscription_subscription_resource
def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_create_subscription(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_subscription_template(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_get_subscription(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_add_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def test_update_subscription(self):\n pass", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )", "def test_get_subscription_template(self):\n pass", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def test_update_subscription_template(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_process_subscriptions(self):\n pass" ]
[ "0.87412554", "0.8588088", "0.83379585", "0.82308966", "0.7775794", "0.7608592", "0.7605677", "0.75500727", "0.7347543", "0.7300998", "0.7060672", "0.70161843", "0.69488347", "0.69302607", "0.6744745", "0.6676874", "0.66014934", "0.65724623", "0.64912724", "0.6457783", "0.64099145", "0.6408436", "0.64026326", "0.6395089", "0.63897306", "0.6339286", "0.6278011", "0.62710935", "0.6232606", "0.6171253" ]
0.94596004
0
Test case for create_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_create_subscription(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource_spaces(self):\n pass", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_create_subscription_template(self):\n pass", "def test_index_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_load_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_child_index_response_descriptor_projects_project_projects_release_project_resource_release_resource_spaces(self):\n pass", "def test_namespace_bucket_creation_with_many_resources_rpc(\n self, ns_resource_factory, bucket_factory\n ):\n logger.info(\"Create namespace resources and verify health\")\n ns_resources = [ns_resource_factory()[1] for _ in range(0, 100)]\n\n logger.info(\"Create the namespace bucket with many namespace resources\")\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resources[0],\n read_ns_resources=ns_resources,\n )", "def test_modify_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )", "def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass" ]
[ "0.87898076", "0.84957993", "0.84469354", "0.8135916", "0.8024415", "0.75330555", "0.74872947", "0.72072065", "0.7170737", "0.7123899", "0.7024082", "0.6879563", "0.68744624", "0.66875726", "0.6547886", "0.6496468", "0.6405576", "0.62375563", "0.62205213", "0.62015754", "0.610602", "0.609149", "0.60752416", "0.60466284", "0.6032164", "0.602143", "0.6019126", "0.60151756", "0.5987308", "0.59187645" ]
0.94430006
0
Test case for delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource
def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def test_delete_subscription_template(self):\n pass", "def test_delete_template_subscription(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "async def delete_sub(self, sub: TSub) -> None:", "def test_cancel_subscription(self):\n try:\n self.arb.cancel_subscription()\n except KeyError:\n self.arb.cancel_subscription(subscription_id=u\"1234\")", "def __call__(\n self,\n request: pubsub.DeleteSubscriptionRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ):\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"delete\",\n \"uri\": \"/v1/{subscription=projects/*/subscriptions/*}\",\n },\n ]\n request, metadata = self._interceptor.pre_delete_subscription(\n request, metadata\n )\n pb_request = pubsub.DeleteSubscriptionRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def retrieveDeleteSubscription():\n if GlobalValues._recoSubscription == None:\n GlobalValues._deleteSubscription = \\\n _getSubscription(Workflow(spec = \"FileDelete\", \n owner = \"CMSTier0\",\n name = \"FileDelete\"),\n Fileset(name = \"Deletable\")\n )\n \n return GlobalValues._deleteSubscription", "def delete_subscription_from_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.delete.subscription_does_not_exist\")\n return\n\n if subscription.status not in [\n QuerySubscription.Status.DELETING.value,\n QuerySubscription.Status.DISABLED.value,\n ]:\n metrics.incr(\"snuba.subscriptions.delete.incorrect_status\")\n return\n\n if subscription.subscription_id is not None:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n\n if subscription.status == QuerySubscription.Status.DELETING.value:\n subscription.delete()\n else:\n subscription.update(subscription_id=None)", "def test_unsubscribe(self):\n self.service.clientConnected()\n\n unsubscribers = []\n self.service.subscribe(u'url', 1\n ).addCallback(lambda fn: unsubscribers.append(fn))\n self.service.subscribe(u'url', 2\n ).addCallback(lambda fn: unsubscribers.append(fn))\n\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertNotIn(u'url', pubsubClient.subscriptions)", "def delete_subscription(self, subscription_id):\n url = '{}/v2/subscriptions/{}'.format(self.url, subscription_id)\n print(url)\n r = requests.delete(url, headers=self.headers_v2)\n if r.status_code == 204:\n return 'success'\n return r.json()", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def pre_delete_subscription(\n self,\n request: pubsub.DeleteSubscriptionRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[pubsub.DeleteSubscriptionRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def _async_untrack_subscription(self, subscription: Subscription) -> None:\n topic = subscription.topic\n try:\n if _is_simple_match(topic):\n simple_subscriptions = self._simple_subscriptions\n simple_subscriptions[topic].remove(subscription)\n if not simple_subscriptions[topic]:\n del simple_subscriptions[topic]\n else:\n self._wildcard_subscriptions.remove(subscription)\n except (KeyError, ValueError) as ex:\n raise HomeAssistantError(\"Can't remove subscription twice\") from ex", "def remove_subscription(\n connection, subscription_id, project_id, error_msg=None, exception_type=None\n):\n response = connection.delete(\n url=connection.base_url + '/api/subscriptions/' + subscription_id,\n headers={'X-MSTR-ProjectID': project_id},\n )\n if not response.ok:\n if error_msg is None:\n error_msg = f\"Error unsubscribing Subscription {subscription_id}\"\n if exception_type is None:\n response_handler(response, error_msg)\n else:\n exception_handler(error_msg, exception_type)\n return response", "def delete_subscription_action(self,\n subscription_id,\n action_id):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions/{subscription_id}/actions/{action_id}')\n .http_method(HttpMethodEnum.DELETE)\n .template_param(Parameter()\n .key('subscription_id')\n .value(subscription_id)\n .should_encode(True))\n .template_param(Parameter()\n .key('action_id')\n .value(action_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def delete_subscription(self):\n try:\n self.client.delete_subscription(subscription=self.subscription_path)\n except NotFound:\n pass\n else:\n self._log_and_print(f'Deleted subscription: {self.subscription_path}')", "def test_registration_delete_inactive(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n registration.is_deleted = True\n signals.event.registration_deleted.send(registration)\n assert api_delete.call_count == 0\n assert api_post.call_count == 0", "def test_registration_delete_inactive(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n registration.is_deleted = True\n signals.event.registration_deleted.send(registration)\n assert api_delete.call_count == 0\n assert api_post.call_count == 0", "def test_registration_delete_active(dummy_regform, api_delete, api_post):\n registration = dummy_regform.registrations[0]\n grant_access([registration], dummy_regform, email_body='body', email_subject='subject')\n assert api_post.call_count == 1\n\n registration.is_deleted = True\n signals.event.registration_deleted.send(registration)\n assert api_delete.call_count == 1\n assert api_post.call_count == 1" ]
[ "0.88532954", "0.7841728", "0.7416214", "0.73064417", "0.71716964", "0.6985787", "0.69727606", "0.6864229", "0.66472006", "0.6516905", "0.647116", "0.64189106", "0.63475853", "0.6346618", "0.6203117", "0.6197093", "0.61706185", "0.6137428", "0.6111354", "0.60975397", "0.6088102", "0.6087677", "0.6060979", "0.60551065", "0.6054138", "0.6052445", "0.5983072", "0.58845705", "0.58845705", "0.58837706" ]
0.9524074
0
Test case for delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def test_delete_subscription_template(self):\n pass", "def test_delete_template_subscription(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass", "def test_delete_resource_used_in_ns_bucket_rpc(\n self, mcg_obj, cld_mgr, ns_resource_factory, bucket_factory\n ):\n # Create the namespace resources and verify health\n _, resource1 = ns_resource_factory()\n _, resource2 = ns_resource_factory()\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=resource1,\n read_ns_resources=[resource1, resource2],\n )\n response = mcg_obj.send_rpc_query(\n \"pool_api\", \"delete_namespace_resource\", {\"name\": resource2}\n )\n assert \"error\" in response.json()", "def test_delete_collection_cluster_resource_quota(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def post_namespace_delete(self, resource_id, resource_dict):\n pass", "def test_delete_hyperflex_ext_fc_storage_policy(self):\n pass", "def test_delete_on_background_response_descriptor_variables_library_variable_set_library_variable_set_resource(self):\n pass", "def test_delete_resource_group(self):\n pass", "def post_qos_queue_delete(self, resource_id, resource_dict):\n pass", "def test_delete_collection_namespaced_policy(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_collection_namespaced_policy_binding(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete_collection_namespaced_deployment_config(self):\n pass", "def request_subset_delete(self, request):\n workspace_uuid = request['workspace_uuid']\n subset_uuid = request['subset_uuid']\n# print('###', user_id)\n# print('###', alias)\n# print('###', source_uuid)\n uuid_mapping = self._get_uuid_mapping_object(workspace_uuid)\n workspace_alias = uuid_mapping.get_alias(workspace_uuid) \n response = self.delete_subset(workspace_alias=workspace_alias, subset_unique_id=subset_uuid)\n \n return response" ]
[ "0.8165313", "0.8090574", "0.7151934", "0.7094335", "0.7016619", "0.677698", "0.6688813", "0.6620941", "0.66099346", "0.6475584", "0.6383104", "0.6330714", "0.62470895", "0.6236102", "0.61197543", "0.61036897", "0.59046084", "0.58654064", "0.58646995", "0.5852549", "0.5819268", "0.58127946", "0.5802008", "0.57719857", "0.57573754", "0.57493794", "0.5722895", "0.57042134", "0.5696635", "0.56558543" ]
0.9390268
0
Test case for index_response_descriptor_subscriptions_subscription_subscription_resource
def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_get_subscription(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_subscription(self):\n pass", "def test_update_subscription(self):\n pass", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_issue_subscriptions(self):\n pass", "def test_update_subscription_template(self):\n pass", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_create_subscription_template(self):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def test_list_template_subscriptions(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_successful_subscriptions_exists_subbed(self) -> None:\n self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(self.streams[0], True, True)", "def tests_get_subscription(self):\n manager_root = ISubscriptionManager(self.root)\n manager_root.subscribability = SUBSCRIBABLE\n manager_root.subscribe('[email protected]')\n manager_folder = ISubscriptionManager(self.root.folder)\n manager_folder.subscribe('[email protected]')\n\n manager = ISubscriptionManager(self.root.folder.index)\n manager.subscribability = SUBSCRIBABLE\n manager.subscribe('[email protected]')\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root)\n self.assertEqual(len(manager.get_subscriptions()), 3)\n\n manager_root.subscribability = NOT_SUBSCRIBABLE\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root.folder.index)\n self.assertEqual(len(manager.get_subscriptions()), 1)", "def test_successful_subscriptions_list_subscribers(self) -> None:\n result = self.api_get(\n self.test_user,\n \"/api/v1/users/me/subscriptions\",\n {\"include_subscribers\": \"true\"},\n )\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))", "def test_process_subscriptions(self):\n pass", "def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'" ]
[ "0.89304525", "0.88660175", "0.863393", "0.83659315", "0.82808524", "0.79980975", "0.78439623", "0.767982", "0.7596332", "0.73273385", "0.72785175", "0.72358495", "0.71355075", "0.708252", "0.70700455", "0.699588", "0.6935531", "0.6835491", "0.68049234", "0.6770686", "0.6734396", "0.67046565", "0.66824365", "0.66525984", "0.6631061", "0.6599763", "0.65988135", "0.6593527", "0.6509409", "0.64901495" ]
0.94264156
0
Test case for index_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource_spaces(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_child_index_response_descriptor_projects_project_projects_release_project_resource_release_resource_spaces(self):\n pass", "def test_index_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_index_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_get_subscription(self):\n pass", "def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_create_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_child_index_response_descriptor_policies_machine_policy_machines_deployment_targets_deployment_target_machine_policy_resource_deployment_target_resource_spaces(self):\n pass", "def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass", "def test_create_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_modify_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_get_subscription_template(self):\n pass" ]
[ "0.87763214", "0.87645346", "0.86041075", "0.8510752", "0.7778567", "0.73008937", "0.726181", "0.724671", "0.7137913", "0.7121906", "0.696909", "0.69145846", "0.68500614", "0.6792931", "0.6701232", "0.66211265", "0.64530164", "0.6076274", "0.6062893", "0.60609174", "0.6055377", "0.6037393", "0.60326797", "0.60021794", "0.595838", "0.59469664", "0.5938181", "0.5934404", "0.59039325", "0.58531153" ]
0.9353701
0
Test case for list_all_response_descriptor_subscriptions_subscription_subscription_resource
def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def test_get_subscriptions(self):\n pass", "def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'", "def ListSubscriptions(): # pylint: disable=unused-variable\n\n try:\n list_request = json_format.Parse(request.get_data(),\n sheriff_config_pb2.ListRequest())\n except json_format.ParseError as error:\n return jsonify(\n {'messages': [{\n 'severity': 'ERROR',\n 'text': '%s' % (error)\n }]}), 400\n list_response = sheriff_config_pb2.ListResponse()\n configs = list(luci_config.ListAllConfigs(datastore_client))\n configs = match_policy.FilterSubscriptionsByIdentity(\n auth_client, list_request, configs)\n for config_set, revision, subscription in configs:\n subscription_metadata = list_response.subscriptions.add()\n subscription_metadata.config_set = config_set\n subscription_metadata.revision = revision\n luci_config.CopyNormalizedSubscription(subscription,\n subscription_metadata.subscription)\n return (json_format.MessageToJson(\n list_response, preserving_proto_field_name=True), 200, {\n 'Content-Type': 'application/json'\n })", "def test_successful_subscriptions_list_subscribers(self) -> None:\n result = self.api_get(\n self.test_user,\n \"/api/v1/users/me/subscriptions\",\n {\"include_subscribers\": \"true\"},\n )\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))", "def test_successful_subscriptions_list(self) -> None:\n result = self.api_get(self.test_user, \"/api/v1/users/me/subscriptions\")\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))", "def test_list_template_subscriptions(self):\n pass", "def test_register_subscriptions_for_list(self, register_subscription):\n mocks = [Mock(), Mock(), Mock()]\n subscriptions = [\n EventSubscription(mocks[0], lambda _: None),\n EventSubscription(mocks[1], lambda _: None),\n EventSubscription(mocks[2], lambda _: None),\n ]\n\n event_bus.register_subscriptions(subscriptions)\n received_subscriptions = set()\n for index, call in enumerate(register_subscription.call_args_list):\n received_subscriptions.add(self.get_subscription_argument(call))\n\n self.assertEqual(register_subscription.call_count, len(subscriptions))\n self.assertSetEqual(received_subscriptions, set(subscriptions))", "def test_get_subscription(self):\n pass", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def test_get_subscription_templates(self):\n pass", "def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]", "def get_all_subscriptions(self, next_token=None):\r\n params = {'ContentType' : 'JSON'}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptions', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def test_list_pending_template_subscriptions(self):\n pass", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def __call__(\n self,\n request: pubsub.ListSubscriptionsRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.ListSubscriptionsResponse:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"get\",\n \"uri\": \"/v1/{project=projects/*}/subscriptions\",\n },\n ]\n request, metadata = self._interceptor.pre_list_subscriptions(\n request, metadata\n )\n pb_request = pubsub.ListSubscriptionsRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.ListSubscriptionsResponse()\n pb_resp = pubsub.ListSubscriptionsResponse.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_list_subscriptions(resp)\n return resp", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def list(self):\n SubDets = namedtuple(\"SubDetails\", [\"subscription_id\", \"name\"])\n return [SubDets(\"123\", \"sub1\")]", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass" ]
[ "0.8672392", "0.80634016", "0.7963346", "0.79525554", "0.7950173", "0.762874", "0.75751597", "0.75354266", "0.75185573", "0.7085981", "0.7015135", "0.6927433", "0.6609123", "0.6608625", "0.65717506", "0.64721626", "0.6449704", "0.6410992", "0.6408065", "0.63710225", "0.6327911", "0.6324767", "0.632427", "0.6310313", "0.6258734", "0.62567514", "0.62113565", "0.61863667", "0.6154192", "0.61292213" ]
0.94718015
0
Test case for list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource_spaces(self):\n pass", "def test_list_applied_cluster_resource_quota_for_all_namespaces(self):\n pass", "def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass", "def test_index_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_load_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'", "def test_create_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_modify_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_get_resource_license_resource_count_list(self):\n pass", "def collect_resources_list(namespace, output_dir, k8s_cli, mode):\n selector = \"\"\n if mode == MODE_RESTRICTED:\n selector = '--selector=\"{}\"'.format(OPERATOR_LABEL)\n collect_helper(output_dir,\n cmd=\"{} get all -o wide -n {} {}\".format(k8s_cli, namespace, selector),\n file_name=\"resources_list\",\n resource_name=\"resources list\",\n namespace=namespace)", "def test_list_deployment_config_for_all_namespaces(self):\n pass", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def test_child_index_response_descriptor_projects_project_projects_release_project_resource_release_resource_spaces(self):\n pass", "def test_list_namespaced_applied_cluster_resource_quota(self):\n pass" ]
[ "0.84293175", "0.8371931", "0.82569367", "0.8224417", "0.7847217", "0.7219924", "0.6882947", "0.6615695", "0.65908223", "0.6537668", "0.65073764", "0.6463803", "0.6305704", "0.62535024", "0.61962706", "0.61595654", "0.6100827", "0.60608906", "0.5947366", "0.5946404", "0.58986884", "0.58949417", "0.5880279", "0.5763452", "0.5733054", "0.5694511", "0.5671393", "0.5658129", "0.564597", "0.56440437" ]
0.9376608
0
Test case for load_response_descriptor_subscriptions_subscription_subscription_resource
def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_get_subscription(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_subscription(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_process_subscriptions(self):\n pass", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_update_subscription(self):\n pass", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_subscriptions(self):\n pass", "def test_list_template_subscriptions(self):\n pass", "def tests_get_subscription(self):\n manager_root = ISubscriptionManager(self.root)\n manager_root.subscribability = SUBSCRIBABLE\n manager_root.subscribe('[email protected]')\n manager_folder = ISubscriptionManager(self.root.folder)\n manager_folder.subscribe('[email protected]')\n\n manager = ISubscriptionManager(self.root.folder.index)\n manager.subscribability = SUBSCRIBABLE\n manager.subscribe('[email protected]')\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root)\n self.assertEqual(len(manager.get_subscriptions()), 3)\n\n manager_root.subscribability = NOT_SUBSCRIBABLE\n\n self.assertEqual(\n manager.get_subscription('[email protected]'),\n None)\n subscription = manager.get_subscription('[email protected]')\n self.assertTrue(verifyObject(ISubscription, subscription))\n self.assertEqual(subscription.email, '[email protected]')\n self.assertEqual(subscription.content, self.root.folder.index)\n self.assertEqual(len(manager.get_subscriptions()), 1)", "def test_expand_subscription_request(self):\n\n input_dict = dict(\n resource_id=\"test_resource_id\",\n protocol=\"test_protocol\",\n endpoint=\"test_endpoint\",\n event_type=\"test_event_type\",\n event_format=\"test_event_format\"\n )\n module = MockModule(input_dict=input_dict)\n actual_event_subscription = expand_subscription_request(module=module)\n\n self.assertEqual(\"test_resource_id\", actual_event_subscription.resource_id)\n self.assertEqual(\"test_protocol\", actual_event_subscription.protocol)\n self.assertEqual(\"test_endpoint\", actual_event_subscription.endpoint)\n self.assertEqual(\"test_event_type\", actual_event_subscription.event_type)\n self.assertEqual(\"test_event_format\", actual_event_subscription.event_format)", "def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'", "def test_create_subscription_template(self):\n pass", "def test_successful_subscriptions_exists_subbed(self) -> None:\n self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage\n self.helper_subscriptions_exists(self.streams[0], True, True)", "def test_load_response_descriptor_events_event_event_resource(self):\n pass", "def subscribe_verify(self,\n raw_response: Any,\n sub_mode: str = 'SAMPLE',\n *args,\n **kwargs):\n pass" ]
[ "0.8565985", "0.8484611", "0.841637", "0.80878854", "0.7847351", "0.77231085", "0.7458851", "0.7193779", "0.70314056", "0.69356346", "0.6887608", "0.67824644", "0.65065", "0.63490146", "0.6303993", "0.6292917", "0.6265991", "0.6260562", "0.62523186", "0.6239016", "0.61835974", "0.61286", "0.6067403", "0.60601425", "0.6039283", "0.60210925", "0.6008072", "0.5991348", "0.5973217", "0.5927565" ]
0.94681466
0
Test case for load_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_load_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_modify_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_index_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_modify_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_index_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource_spaces(self):\n pass", "def test_get_subscription(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource(self):\n pass" ]
[ "0.8682473", "0.85453385", "0.84766823", "0.83558804", "0.81268406", "0.7312639", "0.7308806", "0.7230002", "0.71687704", "0.7120318", "0.70175415", "0.69992745", "0.6867542", "0.6661521", "0.6638656", "0.6579834", "0.6508864", "0.6475547", "0.64672554", "0.6386832", "0.6359892", "0.63263905", "0.6309862", "0.607099", "0.59795564", "0.5959133", "0.5955517", "0.59181994", "0.57836616", "0.5644431" ]
0.94706297
0
Test case for modify_response_descriptor_subscriptions_subscription_subscription_resource
def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_get_subscription(self):\n pass", "def test_create_subscription(self):\n pass", "def test_issue_subscriptions(self):\n pass", "def test_cmd_cs_subscription_update(self, mocker):\n\n mock_update_return = {\n 'subscription_id': SUBSCRIPTION_ID,\n 'account_id': 'a-123'\n }\n\n mock_subscription_client_update = mocker.patch.object(\n SubscriptionClient,\n \"update\",\n return_value=mock_update_return\n )\n\n declaration_file = 'decl.json'\n expected_config_file = os.path.join(os.getcwd(), declaration_file)\n result = self.runner.invoke(cli, ['subscription', 'update', '--subscription-id',\n SUBSCRIPTION_ID, '--declaration', 'decl.json'])\n assert result.output == json.dumps(mock_update_return, indent=4, sort_keys=True) + '\\n'\n assert mock_subscription_client_update.call_args[1]['config_file'] == expected_config_file", "def test_update_template_subscription(self):\n pass", "def test_update_subscription(self):\n args = dict(trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )\n\n try:\n self.arb.update_subscription(**args)\n except KeyError:\n self.arb.update_subscription(subscription_id=u\"1234\", **args)", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_process_subscriptions(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_cmd_cs_subscription_list(self, mocker):\n\n mock_response = {\n 'foo': 'bar'\n }\n mocker.patch.object(\n SubscriptionClient,\n \"list\",\n return_value=mock_response\n )\n\n result = self.runner.invoke(cli, ['subscription', 'list'])\n assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\\n'", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_expand_subscription_request(self):\n\n input_dict = dict(\n resource_id=\"test_resource_id\",\n protocol=\"test_protocol\",\n endpoint=\"test_endpoint\",\n event_type=\"test_event_type\",\n event_format=\"test_event_format\"\n )\n module = MockModule(input_dict=input_dict)\n actual_event_subscription = expand_subscription_request(module=module)\n\n self.assertEqual(\"test_resource_id\", actual_event_subscription.resource_id)\n self.assertEqual(\"test_protocol\", actual_event_subscription.protocol)\n self.assertEqual(\"test_endpoint\", actual_event_subscription.endpoint)\n self.assertEqual(\"test_event_type\", actual_event_subscription.event_type)\n self.assertEqual(\"test_event_format\", actual_event_subscription.event_format)", "def test_issue_delete_subscription(self):\n pass" ]
[ "0.8563571", "0.84658855", "0.8058546", "0.80224764", "0.7713785", "0.75516945", "0.7453322", "0.7192042", "0.71334755", "0.71012443", "0.70175844", "0.7000383", "0.6748506", "0.6710934", "0.6697179", "0.66575694", "0.66456854", "0.6641054", "0.6612385", "0.6570631", "0.65649444", "0.6503902", "0.6501905", "0.6468531", "0.64355934", "0.63731563", "0.6298211", "0.6295012", "0.6208842", "0.6179798" ]
0.9448674
0
Test case for modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces
def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_modify_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_create_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_modify_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_create_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_index_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_child_index_response_descriptor_projects_release_projects_deployment_release_resource_deployment_resource_spaces(self):\n pass", "def test_load_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass", "def test_child_index_response_descriptor_projects_project_projects_release_project_resource_release_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass", "def test_list_all_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_modify_response_descriptor_projects_release_release_resource(self):\n pass", "def test_child_index_response_descriptor_policies_machine_policy_machines_deployment_targets_deployment_target_machine_policy_resource_deployment_target_resource_spaces(self):\n pass", "def test_index_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def test_update_subscription(self):\n pass" ]
[ "0.87181616", "0.8456382", "0.83001345", "0.8015579", "0.7987088", "0.76178694", "0.7296327", "0.72865313", "0.71541685", "0.69881696", "0.69022465", "0.69001484", "0.6717512", "0.6682169", "0.6617533", "0.66076016", "0.6596086", "0.6362363", "0.63535696", "0.629757", "0.6274665", "0.61863935", "0.6126464", "0.6065326", "0.60577476", "0.5995032", "0.59438616", "0.5940625", "0.59067494", "0.5863138" ]
0.9437597
0
Returns country by location name. May raise LocationNotFound and LocationReplacement
def country(name): return location_db().find(name=name)["country"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_country_name(df, location):\n d = df[df.location == location]\n return d.country.values[0]", "def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None", "def lookup_country(latitude, longitude):\n r = requests.get(\"https://api.opencagedata.com/geocode/v1/json?q={}+{}&key=1a43cea9caa6420a8faf6e3b4bf13abb\".format(latitude, longitude))\n if r.status_code != 200:\n print(\"Error accessing OpenCage API: {}\".format(r.content))\n return \"Unknown\"\n result = r.json()\n if not \"results\" in result.keys() or len(result[\"results\"]) < 1:\n print(\"No results found\")\n return \"Unknown\"\n components = result[\"results\"][0][\"components\"]\n if not \"country\" in components.keys():\n print(\"Couldn't locate {}N {}E to a country\".format(latitude, longitude))\n return \"Unknown\"\n return components[\"country\"]", "def test_get_country_by_geo_location(self):\n pass", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None", "def get_country_code(country_name) :\n for code, name in COUNTRIES.items() :\n if name==country_name :\n return code\n # if the country wasn't found, return None\n return None", "def locate(location):\n coord = None\n country_name = None\n if location:\n location = location.lower()\n\n for ind, row in country_map.iterrows():\n\n if (\n (re.match(r'(.*\\W|\\W*){}\\b'.format(row['code2']), location))\n or(re.match(r'(.*\\W|\\W*){}\\b'.format(row['code3']), location))\n or(re.match(r'(.*\\W|\\W*){}\\b'.format(row['name']), location))):\n\n coord = [row['lat'], row['lang']]\n country_name = row['name']\n break\n return country_name, coord", "def get_country_code(country_name):\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n # If country was not found, return nothing\n return None", "def get_country(ip):\r\n return geoip.country_code_by_addr(ip)", "def get_country_code(contry_name):\n for code, name in COUNTRIES.items():\n if name == contry_name:\n return code\n return None", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n # if string isn't found returns None\n else:\n continue", "def get_user_country(user_location):\n geo_locator = geopy.Nominatim(user_agent=\"User Location\", timeout=10)\n location = geo_locator.reverse(user_location, language='en')\n location = str(location).split(', ')\n country = location[-1]\n\n if country == 'United States of America':\n country = 'USA'\n elif country == 'United Kingdom':\n country = 'UK'\n\n return country", "def get_country_name(ip_addr):\n global geoip_db_reader\n try:\n name = geoip_db_reader.country(ip_addr).country.name\n return name\n except geoip2.errors.AddressNotFoundError:\n return None", "def continent(name=None):\n ldb = location_db()\n try:\n return ldb.find_continent(country=name)\n except LocationNotFound:\n return ldb.find_continent(country=ldb.find(name=name)[\"country\"])", "def get_country_code(self):\n\n try:\n sub_div = next(sub_div for sub_div in pycountry.subdivisions if sub_div.name == self.location)\n country = next(country for country in pycountry.countries if country.alpha_2 == sub_div.country_code)\n return country.alpha_3\n except StopIteration as exc:\n print(\"Cannot find subdivision in\" + str(exc))\n return 'XXX'", "def country_identifier(name):\n if name.lower() in _country_dict.keys():\n return _country_dict[name.lower()]\n else:\n return name", "def get_country_code(country_name):\n # values = list(COUNTRIES.values())\n # keys = list(COUNTRIES.keys())\n #\n # try:\n # index = values.index(country_name)\n # except ValueError:\n # # Not found\n # return None\n #\n # return keys[index]\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n return None", "def get_country(ip_addr):\r\n html = urllib2.urlopen('http://freegeoip.net/json/' + ip_addr).read()\\\r\n .decode('utf-8')\r\n responseJson = json.loads(html)\r\n return responseJson.get('country_code')", "def get_country_info(country):\n return GoogleV3().geocode(country)", "def mock_country_code_by_addr(self, ip_addr):\r\n ip_dict = {\r\n '1.0.0.0': 'CU',\r\n '2.0.0.0': 'IR',\r\n '3.0.0.0': 'SY',\r\n '4.0.0.0': 'SD',\r\n '5.0.0.0': 'AQ', # Antartica\r\n }\r\n return ip_dict.get(ip_addr, 'US')", "def get_country_from_record(ip):\r\n record = geoip.record_by_addr(ip)\r\n if record != None:\r\n return record['country_code']", "def get_geo_location(country: str) -> Optional[str]:\n country_to_geo_location_mapping = {\n \"Portugal\": \"Europe\",\n \"Spain\": \"Europe\",\n \"France\": \"Europe\",\n \"Italy\": \"Europe\",\n \"Malta\": \"Europe\",\n \"Switzerland\": \"Europe\",\n \"Austria\": \"Europe\",\n \"Slovenia\": \"Europe\",\n \"Croatia\": \"Europe\",\n \"Greece\": \"Europe\",\n \"Turkey\": \"Europe\",\n \"North Macedonia\": \"Europe\",\n \"Poland\": \"Europe\",\n \"Germany\": \"Europe\",\n \"Netherlands\": \"Europe\",\n \"Denmark\": \"Europe\",\n \"Sweden\": \"Europe\",\n \"Norway\": \"Europe\",\n \"Finland\": \"Europe\",\n \"Latvia\": \"Europe\",\n \"Russia\": \"Europe\",\n \"Belgium\": \"Europe\",\n \"Ireland\": \"Europe\",\n \"United Kingdom\": \"Europe\",\n \"Iceland\": \"Europe\",\n \"Canada\": \"North America\",\n \"Mexico\": \"North America\",\n \"United States\": \"North America\",\n }\n\n return country_to_geo_location_mapping.get(country, None)", "def get_country(self):\n return self.reference[REF_COUNTRY][REF_VALUE]", "def country(self):\n if self._country is not None:\n return self._country\n if not self.isValid():\n return None\n self._country = self.geocoder_results.country\n return self._country", "def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]", "def get_county_by_name(self, name):\n raise NotImplementedError()", "def searchCountry(host):\n process = subprocess.Popen(\"geoiplookup \"+host,stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n secondPart = output.split(\"GeoIP Country Edition: \", 1)[1]\n country = secondPart.split(\"\\nGeoIP City Edition\", 1)[0]\n return country", "def country() -> str:", "def get_ip_country(ip_address):\r\n with requests.Session() as s:\r\n resp = s.get(IPINFO_URL.format(ip=ip_address))\r\n resp.raise_for_status()\r\n return resp.json()['country']", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")" ]
[ "0.6946081", "0.6925547", "0.689955", "0.6889994", "0.68031585", "0.67772377", "0.6711055", "0.66826653", "0.6602543", "0.65752983", "0.6561224", "0.6550494", "0.6543176", "0.65219474", "0.6404109", "0.63928", "0.6388106", "0.63348466", "0.63323516", "0.6282033", "0.62559354", "0.61775494", "0.6159959", "0.6047104", "0.6009112", "0.59924144", "0.5982803", "0.597106", "0.5966651", "0.5949842" ]
0.8324526
0
Returns continent by either location name or country. May raise LocationNotFound and LocationReplacement
def continent(name=None): ldb = location_db() try: return ldb.find_continent(country=name) except LocationNotFound: return ldb.find_continent(country=ldb.find(name=name)["country"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_continent(country_code):\r\n for c in continents:\r\n if c.contains(country_code):\r\n return c\r\n plog(\"INFO\", country_code + \" is not on any continent\")\r\n return None", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def continent(self) -> Optional[str]:\n return pulumi.get(self, \"continent\")", "def test_get_country_by_geo_location(self):\n pass", "def get_country_details(self,country):\n try:\n country_obj = pycountry.countries.get(name=country)\n if country_obj is None:\n c = pycountry.countries.search_fuzzy(country)\n country_obj = c[0]\n continent_code = pc.country_alpha2_to_continent_code(country_obj.alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj.alpha_3, continent\n except:\n if 'Congo' in country:\n country = 'Congo'\n elif country == 'Diamond Princess' or country == 'Laos' or country == 'MS Zaandam' or country == 'Holy See' or country == 'Timor-Leste':\n return country, country\n elif country == 'Korea, South' or country == 'South Korea':\n country = 'Korea, Republic of'\n elif country == 'Taiwan*':\n country = 'Taiwan'\n elif country == 'Burma':\n country = 'Myanmar'\n elif country == 'West Bank and Gaza':\n country = 'Gaza'\n else:\n return country, country\n country_obj = pycountry.countries.search_fuzzy(country)\n continent_code = pc.country_alpha2_to_continent_code(country_obj[0].alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj[0].alpha_3, continent", "def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None", "def test_single_word_exeter(self):\n result = location.lookup_location('Exeter GB')\n\n self.assertEqual(result['country'], 'GB')", "def locate(location):\n coord = None\n country_name = None\n if location:\n location = location.lower()\n\n for ind, row in country_map.iterrows():\n\n if (\n (re.match(r'(.*\\W|\\W*){}\\b'.format(row['code2']), location))\n or(re.match(r'(.*\\W|\\W*){}\\b'.format(row['code3']), location))\n or(re.match(r'(.*\\W|\\W*){}\\b'.format(row['name']), location))):\n\n coord = [row['lat'], row['lang']]\n country_name = row['name']\n break\n return country_name, coord", "def lookup_country(latitude, longitude):\n r = requests.get(\"https://api.opencagedata.com/geocode/v1/json?q={}+{}&key=1a43cea9caa6420a8faf6e3b4bf13abb\".format(latitude, longitude))\n if r.status_code != 200:\n print(\"Error accessing OpenCage API: {}\".format(r.content))\n return \"Unknown\"\n result = r.json()\n if not \"results\" in result.keys() or len(result[\"results\"]) < 1:\n print(\"No results found\")\n return \"Unknown\"\n components = result[\"results\"][0][\"components\"]\n if not \"country\" in components.keys():\n print(\"Couldn't locate {}N {}E to a country\".format(latitude, longitude))\n return \"Unknown\"\n return components[\"country\"]", "def get_country_name(df, location):\n d = df[df.location == location]\n return d.country.values[0]", "def get_country_info(country):\n return GoogleV3().geocode(country)", "def get_continent(self):\n return self._tab.find(\"table\", class_=\"details\").find(\"td\", class_=\"value\").get_text()", "def get_user_country(user_location):\n geo_locator = geopy.Nominatim(user_agent=\"User Location\", timeout=10)\n location = geo_locator.reverse(user_location, language='en')\n location = str(location).split(', ')\n country = location[-1]\n\n if country == 'United States of America':\n country = 'USA'\n elif country == 'United Kingdom':\n country = 'UK'\n\n return country", "def get_country_code(country_name) :\n for code, name in COUNTRIES.items() :\n if name==country_name :\n return code\n # if the country wasn't found, return None\n return None", "def country_or_region(self) -> str:\n return pulumi.get(self, \"country_or_region\")", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None", "def get_country_code(self):\n\n try:\n sub_div = next(sub_div for sub_div in pycountry.subdivisions if sub_div.name == self.location)\n country = next(country for country in pycountry.countries if country.alpha_2 == sub_div.country_code)\n return country.alpha_3\n except StopIteration as exc:\n print(\"Cannot find subdivision in\" + str(exc))\n return 'XXX'", "def _perContinentChoiceSelector(self, params):\n\n entity = params['entity']\n choices = soc.models.countries.COUNTRIES_TO_CONTINENT\n\n if 'fields' in params:\n fields = params['fields']\n\n for field in fields:\n entity = entity.__getattribute__(field)\n\n return choices[entity.res_country]", "def get_country_code(country_name):\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n # If country was not found, return nothing\n return None", "def get_geo_location(country: str) -> Optional[str]:\n country_to_geo_location_mapping = {\n \"Portugal\": \"Europe\",\n \"Spain\": \"Europe\",\n \"France\": \"Europe\",\n \"Italy\": \"Europe\",\n \"Malta\": \"Europe\",\n \"Switzerland\": \"Europe\",\n \"Austria\": \"Europe\",\n \"Slovenia\": \"Europe\",\n \"Croatia\": \"Europe\",\n \"Greece\": \"Europe\",\n \"Turkey\": \"Europe\",\n \"North Macedonia\": \"Europe\",\n \"Poland\": \"Europe\",\n \"Germany\": \"Europe\",\n \"Netherlands\": \"Europe\",\n \"Denmark\": \"Europe\",\n \"Sweden\": \"Europe\",\n \"Norway\": \"Europe\",\n \"Finland\": \"Europe\",\n \"Latvia\": \"Europe\",\n \"Russia\": \"Europe\",\n \"Belgium\": \"Europe\",\n \"Ireland\": \"Europe\",\n \"United Kingdom\": \"Europe\",\n \"Iceland\": \"Europe\",\n \"Canada\": \"North America\",\n \"Mexico\": \"North America\",\n \"United States\": \"North America\",\n }\n\n return country_to_geo_location_mapping.get(country, None)", "def test_double_word_coombe_martin(self):\n result = location.lookup_location('Combe Martin GB')\n\n self.assertEqual(result['country'], 'GB')", "def test_single_word_boston(self):\n result = location.lookup_location('Boston GB')\n\n self.assertEqual(result['country'], 'GB')", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n # if string isn't found returns None\n else:\n continue", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def search_using_magento_region(cls, region, country):\n subdivisions = cls.search([\n ('name', 'ilike', region),\n ('country', '=', country.id),\n ])\n\n # TODO: Exception need be created if subdivison does not exist.\n\n return subdivisions and subdivisions[0] or None", "def test_single_word_swanage(self):\n result = location.lookup_location('Swanage GB')\n\n self.assertEqual(result['country'], 'GB')", "def get_country(ip):\r\n return geoip.country_code_by_addr(ip)", "def geonames_query(lat, lon):\n error = False\n if lat == 0 and lon == 0:\n error = True\n if abs(lat) > 90 or abs(lon) > 180:\n error = True\n if error is True:\n d = \"Unknown\"\n return d\n\n k = \"|\".join([str(round(lat, 2)), str(round(lon, 2))])\n d = memcache.get(k)\n\n if d is not None:\n # logging.info(\"Retrieved country from memcache\")\n return d\n else:\n params = {\n 'formatted': 'true',\n 'lat': lat,\n 'lng': lon,\n 'username': 'jotegui',\n 'style': 'full'\n }\n try:\n d = api_query(api_url=GNM_URL, params=params)['countryName']\n except KeyError:\n d = \"Unknown\"\n memcache.add(k, d)\n return d", "def get_country_code(country_name):\n # values = list(COUNTRIES.values())\n # keys = list(COUNTRIES.keys())\n #\n # try:\n # index = values.index(country_name)\n # except ValueError:\n # # Not found\n # return None\n #\n # return keys[index]\n\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n\n return None", "def country_identifier(name):\n if name.lower() in _country_dict.keys():\n return _country_dict[name.lower()]\n else:\n return name" ]
[ "0.7018489", "0.6839589", "0.64475894", "0.64172214", "0.6319791", "0.6041361", "0.6034119", "0.6031853", "0.5983774", "0.59171104", "0.58586824", "0.5858009", "0.583927", "0.58281827", "0.57834524", "0.5782462", "0.571797", "0.5714907", "0.57142687", "0.5703324", "0.56661636", "0.5660493", "0.5656584", "0.5642539", "0.56095237", "0.55965114", "0.55575424", "0.5549445", "0.5536759", "0.5478397" ]
0.8362245
0
Check if every "locations" entry has corresponding "names" entry
def check(self): missing = [] for name in self.data["locations"]: try: n = self.data["names"][name] except KeyError: missing.append(name) if missing: raise RuntimeError("\"names\" list lacks:\n " + "\n ".join(missing))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_names(sections):\n return _check_nentries(sections, \"NAMES\", \"NAMES\")", "def check_glyph_name_in_glyph_set(self, *names):\n if self.glyphNames_:\n for name in names:\n if name in self.glyphNames_:\n continue\n if name not in self.missing:\n self.missing[name] = self.cur_token_location_", "def check_location_page(loc: TMB_Classes.LocationClass, location_species: dict, location_bi_names: dict,\n location_sp_names: dict) -> None:\n all_species = set()\n all_species |= location_species[loc.name]\n all_bi_names = set()\n all_bi_names |= location_bi_names[loc.name]\n all_sp_names = set()\n all_sp_names |= location_sp_names[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n all_bi_names |= fetch_child_data(c, location_bi_names)\n all_sp_names |= fetch_child_data(c, location_sp_names)\n\n # if (len(all_species) == 0) and (len(all_bi_names) == 0) and (len(all_sp_names) == 0):\n # report_error(\"Phantom Location: \" + loc.name)\n\n if loc.n_children() > 0:\n for c in loc.children:\n check_location_page(c, location_species, location_bi_names, location_sp_names)", "def contains(name):", "def _check_availability(self, names: Iterable) -> None:\n unavailable = [x for x in names if x not in self.__by_name.keys()]\n if unavailable:\n raise ValueError(f'datasets: {unavailable} not available in the {self.region} region.')", "def assert_contains(conflict_places, name, other_names):\n place = conflict_places.named_place(name)\n for other_name in other_names:\n other_place = conflict_places.named_place(other_name)\n assert place.contains(other_place)", "def _does_output_dict_contain_info(self):\n for species_output_dict in self.output.values():\n for key0, val0 in species_output_dict.items():\n if key0 in ['paths', 'job_types']:\n for key1, val1 in species_output_dict[key0].items():\n if val1 and key1 not in ['rotors', 'bde']:\n return True\n else:\n if val0:\n return True\n return False", "def contains_any(self, gstmt, names):\n for sub in gstmt.substmts:\n if sub.keyword in self.schema_nodes and sub.arg in names:\n return True\n elif sub.keyword == \"uses\":\n if self.contains_any(sub.i_grouping, names):\n return True\n return False", "def _all_names_unused(elts, unused_name_nodes):\n for elt in elts:\n if isinstance(elt, (ast.List, ast.Tuple)):\n if not _all_names_unused(elt.elts, unused_name_nodes):\n return False\n if elt not in unused_name_nodes:\n return False\n return True", "def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)", "def complete_info(entry):\n required_info = [\"first_name\", \"last_name\", \"from\", \"entry_reason\",\n \"passport\", \"birth_date\", \"home\"]\n if_complete = True\n\n for info in required_info:\n if info not in entry:\n if_complete = False\n break\n return if_complete", "def species_has_geo(species_output_dict: dict) -> bool:\n if species_output_dict['paths']['geo'] or species_output_dict['paths']['composite']:\n return True\n return False", "def is_valid(self):\n for location in self.locations.values():\n if not location.is_valid:\n return False\n return True", "def _validate_names(mapping: Mapping[str, Any],\n ref: str) -> Tuple[Set[str], List[SchemaError]]:\n errs = [] # type: List[SchemaError]\n\n names = {mapping['name']}\n\n if 'classes' in mapping:\n for i, obj in enumerate(mapping['classes']):\n name = obj['name']\n if name in names:\n errs.append(\n SchemaError(\n message=\"Duplicate names: {!r}\".format(name),\n ref=\"{}/classes/{}/name\".format(ref, i)))\n\n names.add(name)\n\n if 'embeds' in mapping:\n for i, obj in enumerate(mapping['embeds']):\n name = obj['name']\n if name in names:\n errs.append(\n SchemaError(\n message=\"Duplicate names: {!r}\".format(name),\n ref=\"{}/embeds/{}/name\".format(ref, i)))\n\n names.add(name)\n\n return names, errs", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def test_has_location_with_invalid_states():\n for state in (None, 1, \"hello\", object):\n assert not location.has_location(state)", "def __can_add_entry_by_name(self, _wiki_entry):\n for ename in self.__exclude_name_list:\n if(_wiki_entry.find(ename) >= 0):\n return False\n return True", "def persona_exists_locally(self, name):\n processed_name = self.process_name(name)\n for dir_ in self.persona_dir:\n dir_ = dir_/processed_name\n if dir_.is_dir() and all(\n name in [path.name for path in dir_.iterdir()]\n for name in ('gender.json', 'summary.txt')):\n return True\n return False", "def get_names(parsed_data):\n known_values = []\n result = []\n # get name from contacts\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar': []}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n\n for contact, info in contacts.items():\n # properties dictionary\n fax = {'fax': '', 'type': 4}\n phone = {'phone': '', 'type': 4}\n country = {'country': '', 'type': 11}\n street = {'street': '', 'type': 8}\n city = {'city': '', 'type': 11}\n email = {'email': '', 'type': 2}\n if info is not None:\n d = {'type': 11, 'data': '', 'properties': {}, 'special_properties': {}, 'ref': {}}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if 'full_name' in name:\n if name['full_name'] in known_values:\n break\n if 'registrar_name' in name:\n if name['registrar_name'] in known_values:\n break\n\n for feature in name.keys():\n if feature == 'full_name':\n d['data'] = name['full_name']\n known_values.append(name['full_name'])\n if feature == 'registrar_name':\n d['data'] = name['registrar_name']\n known_values.append(name['registrar_name'])\n if feature == 'city_name':\n city['city'] = name['city_name']\n if feature == 'street_name':\n street['street'] = name['street_name']\n if feature == 'country_name':\n country['country'] = name['country_name']\n if feature == 'phone_number':\n phone['phone'] = name['phone_number']\n if feature == 'fax_number':\n fax['fax'] = name['fax_number']\n if feature == 'email_address':\n email['email'] = name['email_address']\n # if name is null, discard other info\n if d['data'] == '':\n continue\n # saving name special properties\n special_properties_list.append({'is_username': False, 'type': 0})\n special_properties_list.append({'is_domain_name': False, 'type': 0})\n special_properties_list.append({'is_public_name': False, 'type': 0})\n special_properties_list.append({'is_account_name': False, 'type': 0})\n d['special_properties'] = special_properties_list\n properties_list.append(fax)\n properties_list.append(phone)\n properties_list.append(country)\n properties_list.append(street)\n properties_list.append(city)\n properties_list.append(email)\n d['properties'] = properties_list\n result.append(d)\n return result", "def market_contains_location(market, loc):\r\n for street in market:\r\n if loc in street:\r\n return True\r\n return False", "def test_addr_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_name(input_val)\n self.assertEqual(output_val, self.line.addr_name)", "def get_valid_locations(location_list, grid, shape):", "def describe_locations():\n pass", "def __has_conflicting_node_names(self):\n # check length of sets to determine if overlap exists\n return len({node.get_name() for node in self.get_nodeset()}) != len(self.get_nodeset())", "def test_location(self, all_metars):\n expected = [\"KIAH\", 'KGNV', 'KNID', 'KTPA', 'KP60']\n for metar, expected_val in zip(all_metars, expected):\n parser = Parser(metar)\n actual = parser.parse()\n assert expected_val == actual['location']", "def test_metadata_fonts_no_dupes(self):\n fonts = {}\n for x in self.metadata.get('fonts', None):\n self.assertFalse(x.get('fullName', '') in fonts)\n fonts[x.get('fullName', '')] = x\n\n self.assertEqual(len(set(fonts.keys())),\n len(self.metadata.get('fonts', None)))", "def fountain_on_location(game, loc):\n my_fountains = game.get_my_mana_fountains()\n for fountain in my_fountains:\n if fountain.location.equals(loc):\n return True\n return False", "def locations_name_array(space_array, locations):\n locations_array = [locations.find_one({\"_id\": dic['location']}, {\"_id\": 0, \"archive_name\": 1})[\"archive_name\"] for dic in space_array]\n return locations_array", "def match_info(info_dict):\n try:\n return info_dict['name']==\"Dragons' Den\"\n except KeyError:\n return False", "def __contains__(self, name):\n\n return name in self._wdict" ]
[ "0.6193882", "0.6053967", "0.5882687", "0.56852156", "0.5643407", "0.5642069", "0.56396973", "0.56266916", "0.5610263", "0.55818754", "0.55551094", "0.5553078", "0.55146766", "0.54917747", "0.54744285", "0.5434682", "0.54326814", "0.54243636", "0.54195035", "0.5419454", "0.5304511", "0.52933586", "0.52504027", "0.5239671", "0.5233694", "0.5225354", "0.52018213", "0.5193483", "0.5174762", "0.5172931" ]
0.68714684
0
Forwards data migration. Remove all the externalaccounts for GitHub, GTalk, Verbatim and Locamotion.
def forwards(apps, schema_editor): ExternalAccount = apps.get_model('users', 'ExternalAccount') ExternalAccount.objects.filter(type='GITHUB').delete() ExternalAccount.objects.filter(type='GTALK').delete() ExternalAccount.objects.filter(type='MOZILLALOCAMOTION').delete() ExternalAccount.objects.filter(type='MOZILLAVERBATIM').delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n referral.users.clear()\n referral.save()", "def remove_accounts(self):\n current_creds = self._accounts.copy()\n for creds in current_creds:\n self.remove_account(current_creds[creds].credentials.token,\n current_creds[creds].credentials.url)", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def unlink(self):\n analytic_accounts_to_delete = self.env['account.analytic.account']\n for project in self:\n if project.analytic_account_id and not project.analytic_account_id.line_ids:\n analytic_accounts_to_delete |= project.analytic_account_id\n result = super(Project, self).unlink()\n analytic_accounts_to_delete.unlink()\n return result", "def _clean_database(self):\n # noinspection PyUnresolvedReferences\n env = self.env\n cr = env.cr\n modules_to_resolve = [\n 'ch_vendor_info',\n 'API_PDA_receiver',\n 'delivery_report_custom',\n 'myevo_base',\n 'myevo_nobutton_sending_email',\n 'myevo_web',\n 'purchase_order_custom']\n\n # Rename model module ch_vendor_info\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'ch_vendor_info'\"\"\")\n # Delete module soupese_base models that exists in old models\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_users'\"\"\")\n # Rename\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'API_PDA_receiver'\"\"\")\n\n # Rename module\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'delivery_report_custom'\"\"\")\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_base'\"\"\")\n cr.execute(\n \"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_nobutton_sending_email'\"\"\")\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'myevo_web'\"\"\")\n\n # Delete module soupese_base models that exists in old models\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_measure_scale'\"\"\")\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_pda_operation'\"\"\")\n cr.execute(\"\"\"DELETE FROM ir_model_data WHERE module = 'soupese_base' AND name = 'model_res_partner'\"\"\")\n\n # Rename module\n cr.execute(\"\"\"UPDATE ir_model_data SET module = 'soupese_base' WHERE module = 'purchase_order_custom'\"\"\")\n\n # Rename module_ in base\n for x in modules_to_resolve:\n cr.execute(\"\"\"\n DELETE FROM ir_model_data\n WHERE name = 'module_%s' AND module = 'base' AND model = 'ir.module.module'\"\"\", (x,))\n\n # Uninstall modules\n cr.execute(\"\"\"UPDATE ir_module_module SET state = 'uninstalled' WHERE name = '%s'\"\"\", (x,))\n\n # Remove vendor.information.scale table\n cr.execute(\"DROP TABLE vendor_information_scale\")\n\n # Commit finally\n cr.commit()", "def rollback(migrator, database, fake=False, **kwargs):\n\n migrator.remove_model('tea_teas_types')\n migrator.remove_model('tea_types')\n migrator.remove_model('tea_lists_items')\n migrator.remove_model('tea_lists')\n migrator.remove_model('tea_teas')\n migrator.remove_model('tea_vendors')", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise", "def _clean_amm_swaps(cursor: 'DBCursor') -> None:\n log.debug('Enter _clean_amm_swaps')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"sushiswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_trades%\";')\n cursor.execute('DROP VIEW IF EXISTS combined_trades_view;')\n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n log.debug('Exit _clean_amm_swaps')", "def upgrade():\n with op.batch_alter_table(\"users\") as batch_op:\n batch_op.drop_column(\"registered_date\")\n batch_op.drop_column(\"registered_age\")\n batch_op.drop_column(\"cell\")\n batch_op.drop_column(\"portrait_id\")\n batch_op.drop_column(\"street_number\")\n batch_op.drop_column(\"id_value\")\n batch_op.drop_column(\"nat\")\n batch_op.drop_column(\"id_name\")\n batch_op.drop_column(\"md5\")\n batch_op.drop_column(\"date_of_birth\")\n batch_op.drop_column(\"sha256\")\n batch_op.drop_column(\"username\")\n batch_op.drop_column(\"salt\")\n batch_op.drop_column(\"timezone_offset\")\n batch_op.drop_column(\"uuid\")\n batch_op.drop_column(\"title\")\n batch_op.drop_column(\"age\")\n batch_op.drop_column(\"longitude\")\n batch_op.drop_column(\"sha1\")\n batch_op.drop_column(\"timezone_description\")\n batch_op.drop_column(\"password\")\n batch_op.drop_column(\"latitude\")", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def plone4_cleanup(context):\n portal = api.portal.get()\n qi_tool = api.portal.get_tool('portal_quickinstaller')\n\n # first, make sure all our new un-installers are installed :-)\n installed = [p['id'] for p in qi_tool.listInstalledProducts()]\n reinstall = [p for p in REMOVE_PRODUCTS if p not in installed]\n for p in reinstall:\n log.info(\"Reinstalling first: %s\", p)\n qi_tool.installProducts([p])\n\n # nuke content that we will not support after upgrade\n catalog = api.portal.get_tool('portal_catalog')\n for (p, ctype) in REMOVE_CONTENT.items():\n if p not in REMOVE_PRODUCTS:\n continue\n log.info(\"Removing content: %s\", p)\n for brain in catalog(portal_type=ctype):\n api.content.delete(brain.getObject())\n\n # sjeez TTW persistent interface references\n pvc = portal.portal_view_customizations\n log.info(\"Removing portal_view_customizations TTW cruft\")\n pvc.manage_delObjects(pvc.objectIds())\n\n # remove all the unwanted cruft packages themselves\n for p in REMOVE_PRODUCTS:\n log.info(\"Uninstalling: %s\", p)\n qi_tool.uninstallProducts([p])\n\n # clear the archetypes reference catalog\n log.info(\"Clearing reference catalog\")\n portal.reference_catalog.manage_catalogClear()\n\n # clean cruft from catalog\n log.info(\"Rebuilding portal_catalog. This will take a loooong time.\")\n ctool = api.portal.get_tool('portal_catalog')\n ctool.clearFindAndRebuild()\n\n # migrate to plone.app.contenttypes\n log.info(\"Enabling plone.app.contenttypes\")\n qi_tool.installProducts(['plone.app.contenttypes'])\n\n log.info(\"Committing changes.\")\n transaction.commit()", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def reset_dbs():\n db.answering_users.remove({})\n db.answered_users.remove({})", "def tearDown(self):\n account_models.User.objects.all().delete()", "def drop_unlinked(data):\n data['exchanges'] = [exc\n for exc in data.get('exchanges', [])\n if exc['activity_code'] and exc['flow_code']\n ]\n return data", "def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()", "def cleanup(self):\n\n # uninstall sourcedata\n if self.conversion.install_dataset_path.exists():\n # without the ChangeWorkingDir the command does not operate inside\n # of dataset_path\n with utils.ChangeWorkingDir(self.dataset_path):\n datalad.uninstall(\n path=self.conversion.install_dataset_name,\n dataset=self.dataset_path,\n recursive=True\n )\n\n # remove bids conversion\n bids_dir = self._get_bids_dir()\n if bids_dir.exists():\n self.log.info(\"Remove %s\", bids_dir)\n shutil.rmtree(bids_dir)", "def clean_up():\n drop_all_tables()\n create_all()", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def tearDown(self):\n self.cleanup_tenants()", "def migrate(self):\n\tpass", "def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')", "def refresh(self):\n self._accounts = None", "def _cleanup_incomplete_migrations(self, context):\n LOG.debug('Cleaning up deleted instances with incomplete migration ')\n migration_filters = {'host': CONF.host,\n 'status': 'error'}\n migrations = objects.MigrationList.get_by_filters(context,\n migration_filters)\n\n if not migrations:\n return\n\n inst_uuid_from_migrations = set([migration.instance_uuid for migration\n in migrations])\n\n inst_filters = {'deleted': True, 'soft_deleted': False,\n 'uuid': inst_uuid_from_migrations}\n attrs = ['info_cache', 'security_groups', 'system_metadata']\n with utils.temporary_mutation(context, read_deleted='yes'):\n instances = objects.InstanceList.get_by_filters(\n context, inst_filters, expected_attrs=attrs, use_slave=True)\n\n for instance in instances:\n if instance.host != CONF.host:\n for migration in migrations:\n if instance.uuid == migration.instance_uuid:\n # Delete instance files if not cleanup properly either\n # from the source or destination cloud nodes when\n # the instance is deleted during resizing.\n self.driver.delete_instance_files(instance)\n try:\n migration.status = 'failed'\n with migration.obj_as_admin():\n migration.save()\n except exception.MigrationNotFound:\n LOG.warning(_LW(\"Migration %s is not found.\"),\n migration.id, context=context,\n instance=instance)\n break", "def _pre_deploy_legacy_ltm_cleanup(self):\n\n # Detect legacy names (nodes do not include the route domain)\n self._bigip.refresh_ltm()\n existing_nodes = self._bigip.get_nodes()\n node_list = list(existing_nodes.keys())\n for node_name in node_list:\n route_domain = split_ip_with_route_domain(node_name)[1]\n if route_domain is None:\n break\n else:\n return\n\n existing_iapps = self._bigip.get_app_svcs()\n existing_virtuals = self._bigip.get_virtuals()\n existing_policies = self._bigip.get_l7policies()\n existing_irules = self._bigip.get_irules()\n existing_internal_data_groups = self._bigip.get_internal_data_groups()\n existing_pools = self._bigip.get_pools()\n\n delete_iapps = self._get_resource_tasks(existing_iapps, {})[2]\n delete_virtuals = self._get_resource_tasks(existing_virtuals, {})[2]\n delete_policies = self._get_resource_tasks(existing_policies, {})[2]\n delete_irules = self._get_resource_tasks(existing_irules, {})[2]\n delete_internal_data_groups = self._get_resource_tasks(\n existing_internal_data_groups, {})[2]\n delete_pools = self._get_resource_tasks(existing_pools, {})[2]\n delete_monitors = self._get_monitor_tasks({})[2]\n delete_nodes = self._get_resource_tasks(existing_nodes, {})[2]\n\n delete_tasks = delete_iapps + delete_virtuals + delete_policies + \\\n delete_irules + delete_internal_data_groups + delete_pools + \\\n delete_monitors + delete_nodes\n taskq_len = len(delete_tasks)\n\n finished = False\n LOGGER.debug(\"Removing legacy resources...\")\n while not finished:\n LOGGER.debug(\"Legacy cleanup service task queue length: %d\",\n taskq_len)\n\n # Must remove all resources that depend on nodes (vs, pools, ???)\n delete_tasks = self._delete_resources(delete_tasks)\n\n tasks_remaining = len(delete_tasks)\n\n # Did the task queue shrink?\n if tasks_remaining >= taskq_len or tasks_remaining == 0:\n # No, we have stopped making progress.\n finished = True\n\n # Reset the taskq length.\n taskq_len = tasks_remaining", "def _clean_up(self):", "def tearDown(self):\n Author.objects.all().delete()\n User.objects.all().delete()\n c.credentials()" ]
[ "0.5843353", "0.5682453", "0.56551605", "0.56420624", "0.56205434", "0.56109786", "0.5578443", "0.55584943", "0.5549693", "0.5548861", "0.5527124", "0.55008894", "0.5484804", "0.544788", "0.54415023", "0.5434805", "0.54217434", "0.5392795", "0.53572905", "0.5342952", "0.53320086", "0.53230494", "0.53210753", "0.53135073", "0.53041744", "0.5302397", "0.52877855", "0.52735764", "0.52619606", "0.5245723" ]
0.70499516
0
Copy the static resources.
def copy_static_resources(self): if not hasattr(settings, 'STATIC_ROOT'): raise MissingStaticRoot() destination = os.path.join(STORAGE_PATH, 'static') if os.path.exists(destination): shutil.rmtree(destination) shutil.copytree(settings.STATIC_ROOT, destination)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_static(self, outdir):\n pass", "def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)", "def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0", "def copyFiles(self, package):\n styleFiles = [self.stylesDir/'..'/'base.css']\n\tstyleFiles += [self.stylesDir/'..'/'popup_bg.gif']\n styleFiles += self.stylesDir.files(\"*.css\")\n if \"nav.css\" in styleFiles:\n styleFiles.remove(\"nav.css\")\n styleFiles += self.stylesDir.files(\"*.jpg\")\n styleFiles += self.stylesDir.files(\"*.gif\")\n styleFiles += self.stylesDir.files(\"*.png\")\n styleFiles += self.stylesDir.files(\"*.js\")\n styleFiles += self.stylesDir.files(\"*.html\")\n self.stylesDir.copylist(styleFiles, self.outputDir)\n package.resourceDir.copyfiles(self.outputDir)\n self.scriptsDir.copylist(('libot_drag.js', 'common.js'), \n self.outputDir)\n self.templatesDir.copylist(('videoContainer.swf', 'magnifier.swf',\n 'xspf_player.swf'),self.outputDir)\n (self.templatesDir/'fdl.html').copyfile(self.outputDir/'fdl.html')", "def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def copy_images(repositories, static_dir):\n for repository in repositories:\n if repository.has_key('branch'):\n branch = repository['branch']\n else:\n branch = retrieve_current_branch(repository_directory=os.curdir, fix_environment=True)\n dir = fetch_repository(repository['url'], workdir=os.curdir, branch=branch)\n package_static_dir = os.path.join(dir, repository['package_name'], 'static')\n if os.path.exists(package_static_dir):\n copytree(package_static_dir, os.path.join(static_dir, repository['package_name']))", "def copy_support_files() -> None:\n # root folder files\n filelist = {\"favicon128.png\",\n \"favicon96.png\",\n \"favicon72.png\",\n \"favicon48.png\",\n \"favicon32.png\",\n \"favicon24.png\",\n \"favicon16.png\",\n \"favicon.ico\",\n \"apple-touch-icon.png\",\n \"apple-touch-icon-precomposed.png\",\n \"apple-touch-icon-72x72.png\",\n \"apple-touch-icon-72x72-precomposed.png\",\n \"apple-touch-icon-114x114.png\",\n \"apple-touch-icon-114x114-precomposed.png\",\n \"apple-touch-icon-144x144.png\",\n \"apple-touch-icon-144x144-precomposed.png\",\n \"uca_style.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/\" + filename, WEBOUT_PATH)\n except FileNotFoundError:\n report_error(\"Missing file: resources/\" + filename)\n # image folder files\n filelist = {\"film.png\",\n \"stylifera75.png\",\n \"DOI_logo.svg\",\n \"size_hist.png\",\n \"size_ind.png\",\n \"size_mean.png\",\n \"size_range.png\",\n \"size_summary.png\",\n \"double_clawed.jpg\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/images/\" + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/images/\" + filename)\n filelist = {\"specific_word_cloud.png\",\n \"binomial_word_cloud.png\"}\n for filename in filelist:\n try:\n shutil.copy2(TMP_PATH + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: \" + TMP_PATH + filename)\n # font-awesome files\n filelist = {\"fontawesome.min.js\",\n \"brands.min.js\",\n \"regular.min.js\",\n \"solid.min.js\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/font-awesome/js/\" + filename, WEBOUT_PATH + \"js/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/font-awesome/js/\" + TMP_PATH + filename)\n # flag-icon files\n filelist = {\"flag-icons.min.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/css/\" + filename, WEBOUT_PATH + \"images/flag-icon-css/css/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/css/\" + TMP_PATH + filename)\n filelist = {\"de.svg\", # Germany\n \"es.svg\", # Spain\n \"ru.svg\", # Russia\n \"fr.svg\", # France\n \"pt.svg\", # Portugal\n \"dk.svg\", # Denmark\n \"nl.svg\", # Netherlands\n \"jp.svg\", # Japan\n \"cn.svg\", # China\n \"us.svg\", # USA\n \"th.svg\", # Thailand\n \"va.svg\", # Vatican\n \"it.svg\", # Italy\n \"kr.svg\", # South Korea\n \"pl.svg\", # Poland\n \"mm.svg\", # Myanamar (Burma)\n \"sa.svg\", # Saudi Arabia (best option for Arabic of those available)\n \"id.svg\", # Indonesia\n \"za.svg\", # South Africa (best option for Afrikaans)\n \"my.svg\", # Malaysia (for Malay)\n \"mg.svg\", # Madagascar (for Malagasy)\n \"ir.svg\", # Iran (for Persian)\n \"vn.svg\"} # Vietnam\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/flags/4x3/\" + filename, WEBOUT_PATH +\n \"images/flag-icon-css/flags/4x3/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/flags/4x3/\" + TMP_PATH + filename)", "def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()", "def copy_assets(test_files):\n for path in test_files:\n shutil.copy(path, HOST_ASSETS_PATH)", "def copy_files(self):\n if settings.USE_S3_STORAGE:\n self.copy_to_s3()\n else:\n self.copy_to_local()", "def collectstatic():\n puts(yellow(\"Collect statics\"))\n django_manage('collectstatic', '-l', '--noinput')", "def copy_web_resources(output_dir):\n mypath = os.path.dirname(os.path.realpath(__file__))\n web_path = os.path.join(mypath, 'web')\n\n for (dirpath, dirnames, filenames) in os.walk(web_path):\n relpath = os.path.relpath(dirpath, web_path)\n tgtpath = os.path.join(output_dir, relpath)\n if not os.path.exists(tgtpath):\n os.makedirs(tgtpath)\n\n for f in [os.path.join(dirpath, filename) for filename in filenames]:\n shutil.copy(f, tgtpath)", "def copy_js(self):\n # Compiled JS files for copying\n js_dist_dir = os.path.join(node_root, 'dist', 'pydeck_embeddable')\n # Uncompiled JS files for copying\n # See https://github.com/jupyter-widgets/widget-ts-cookiecutter/blob/master/%7B%7Bcookiecutter.github_project_name%7D%7D/%7B%7Bcookiecutter.python_package_name%7D%7D/nbextension/static/extension.js\n js_src_dir = os.path.join(node_root, 'src')\n js_files = [\n os.path.join(js_src_dir, 'extension.js'),\n os.path.join(js_dist_dir, 'index.js'),\n os.path.join(js_dist_dir, 'index.js.map')\n ]\n static_folder = os.path.join(here, 'pydeck', 'nbextension', 'static')\n for js_file in js_files:\n log.debug('Copying %s to %s' % (js_file, static_folder))\n copy(js_file, static_folder)", "def collect_static():\n\n check_promt = (\n not env.prompt or\n console.confirm(\n \"Collect static files and copy them to collect_static?\",\n default=True,\n )\n )\n\n if check_promt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py collectstatic\"\n \" --noinput\"\n )", "def make_static_assets(opts):\n\n css_filename = do_css(opts['css_source_dir'], opts['out_dir'])\n js_filename = do_js(opts['js_source_dir'], opts['out_dir'])\n return {\n 'primary_css': css_filename,\n 'js': js_filename\n }", "def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')", "def init_static_data(log_to_console=False):\n # These are annoyingly necessary to live in the DB, currently. \n # Really this should be app logic, I think.\n load_report_types()\n load_roles()\n loc_file = getattr(settings, \"STATIC_LOCATIONS\")\n if loc_file:\n load_locations(loc_file, log_to_console=log_to_console)\n product_file = getattr(settings, \"STATIC_PRODUCTS\")\n if product_file:\n load_products(product_file, log_to_console=log_to_console)", "def CopyFiles(self):\n pass", "def __copyFiles(self):\n if os.path.isdir(self.__sourcePath):\n shutil.copytree(self.__sourcePath, self.__targetPath)\n else:\n shutil.copy2(self.__sourcePath, self.__targetPath)", "def assets():", "def copydir(self):\n pass", "def copy_files(self):\n files = ['LICENSE.md', 'CONTRIBUTING.md']\n this_dir = sh.pwd().strip()\n for _file in files:\n sh.cp(\n '{0}/templates/{1}'.format(this_dir, _file),\n '{0}/'.format(self.book.textdir)\n )", "def collectstatic():\n sudo(env.activate)\n sudo('cd %s' % env.whole_path_symlinked + '/aurora; python manage.py collectstatic;')", "def copy_files(self):\n for (source_name, target_name) in self.FILES_TO_LINK:\n src = os.path.expanduser(source_name)\n tgt = os.path.expanduser(target_name)\n cmd = 'cp -rf {src} {tgt}'.format(src=src, tgt=tgt)\n\n print(cmd)\n if not self.dry_run:\n run(cmd)", "def save_resources(self, save_directory):\n for name, file_name in self.resource_files_names.items():\n save_path = os.path.join(save_directory, file_name)\n shutil.copyfile(getattr(self, \"_%s\" % name), save_path)", "def resources(self):", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def create_assets():\n assets = {}\n\n # Load all static files\n for root, dirs, files in os.walk(STATIC_DIR):\n for fname in files:\n filename = os.path.join(root, fname)\n with open(filename, \"rb\") as f:\n assets[os.path.relpath(filename, STATIC_DIR)] = f.read()\n\n # Collect pages\n pages = {}\n for fname in os.listdir(PAGES_DIR):\n if fname.lower().endswith(\".md\"):\n name = fname.split(\".\")[0].lower()\n with open(os.path.join(PAGES_DIR, fname), \"rb\") as f:\n md = f.read().decode()\n pages[name] = Page(name, md)\n\n # todo: Collect blog posts\n\n # Get template\n with open(os.path.join(THIS_DIR, \"template.html\"), \"rb\") as f:\n html_template = f.read().decode()\n\n with open(os.path.join(THIS_DIR, \"style.css\"), \"rb\") as f:\n css = f.read().decode()\n css += \"/* Pygments CSS */\\n\" + HtmlFormatter(style=\"vs\").get_style_defs(\n \".highlight\"\n )\n\n # Generate pages\n year = datetime.now().year\n for page in pages.values():\n page.prepare(pages.keys())\n title = TITLE if page.name == \"index\" else TITLE + \" - \" + page.name\n menu = create_menu(page)\n html = html_template.format(\n title=title, style=css, body=page.to_html(), menu=menu, year=year\n )\n print(\"generating\", page.name + \".html\")\n assets[page.name + \".html\"] = html.encode()\n\n # Fix backslashes on Windows\n for key in list(assets.keys()):\n if \"\\\\\" in key:\n assets[key.replace(\"\\\\\", \"/\")] = assets.pop(key)\n\n return assets", "def assets(self):\n static = self.static\n if static is None:\n return None\n\n assets = os.path.join(static, 'assets')\n if not os.path.isdir(assets):\n return None\n\n return assets" ]
[ "0.7872584", "0.7281044", "0.69176215", "0.63669395", "0.6302897", "0.62768185", "0.6248703", "0.6242949", "0.62096953", "0.6190306", "0.6170092", "0.6154144", "0.61512166", "0.61467654", "0.6134342", "0.60023135", "0.5979792", "0.59327734", "0.5929581", "0.59114957", "0.58955276", "0.58667696", "0.5786932", "0.57863617", "0.57669973", "0.5729245", "0.57163095", "0.5714037", "0.57016575", "0.569508" ]
0.8223046
0
Test for privacy policy view
def test_1_privacy(self): response = self.client.get(reverse('privacy-policy'), follow=True) self.assertEqual(response.status_code, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_viewPrivacyPolicyPage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on PrivacyPolicy page after link PrivacyPolicy click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_privacyPolicy_lnk()\r\n ppolicy_page = page_PrivacyPolicy.Page_PrivacyPolicy(driver)\r\n\r\n driver.get(ppolicy_page.PPOLICY_URL)\r\n wait = WebDriverWait(driver, 20)\r\n element = wait.until(EC.title_contains('Privacy Policy'))\r\n assert ppolicy_page.get_ppolicy_title().find(\"Privacy Policy\") != -1, \"Privacy Policy title page doesn't match\"\r\n\r\n print('--------- SUCCESS test_viewPrivacyPolicyPage-----------')\r\n driver.quit()", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def is_in_privacy_mode(self) -> bool:\n return self.data[Attribute.CAMERA_PRIVACY]", "def can_view(self, user):\r\n return True", "def test_public_user(self):\n set_permission(Permission.SHARE, self.user1, self.collection)\n\n data = {\"public\": \"view\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"none\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n\n data = {\"public\": \"edit\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"share\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n data = {\"public\": \"owner\"}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def has_permission(self, request, view):\n return False", "def has_permission(self, request, view):\n return request.user.group != 'patient'", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")", "def has_permission_to_view(page, user):\n if page.permissions.count() == 0:\n return True\n for perm in page.permissions.all():\n perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)\n if user.has_perm(perm_label):\n return True\n return False", "def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self", "def has_permission(self, request, view):\n return True", "def test_published_story_must_be_visible_for_everyone_but_blocked(self):\n self.assertEqual(self.ps.is_visible_for(self.au), True)\n\n \"\"\" Published story must be visible for another.\"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u2), True)\n\n \"\"\" Publsihed story must be visible for owner. \"\"\"\n self.assertEqual(self.ps.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def test_edit_shelf_privacy(self, *_):\n view = views.Shelf.as_view()\n shelf = self.local_user.shelf_set.get(identifier=\"to-read\")\n self.assertEqual(shelf.privacy, \"public\")\n\n request = self.factory.post(\n \"\",\n {\n \"privacy\": \"unlisted\",\n \"user\": self.local_user.id,\n \"name\": \"To Read\",\n },\n )\n request.user = self.local_user\n view(request, self.local_user.username, shelf.identifier)\n shelf.refresh_from_db()\n\n self.assertEqual(shelf.privacy, \"unlisted\")", "def test_private(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_public\": False, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"private\"})", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def can_be_viewed_by(self,user):\n return True", "def test_perm_is_defined_on(self):\n perm = 'forums_forum.view_in_forum'\n assert access.perm_is_defined_on(perm, Forum.objects.get(pk=3))\n assert not access.perm_is_defined_on(perm, Forum.objects.get(pk=2))", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True" ]
[ "0.65826833", "0.65698117", "0.64647794", "0.6331653", "0.6287771", "0.623985", "0.6236848", "0.61535543", "0.61415833", "0.61177397", "0.61074257", "0.61072975", "0.60979134", "0.6084352", "0.6076933", "0.6038657", "0.60216844", "0.60195696", "0.60150224", "0.6006013", "0.59944177", "0.59120756", "0.59120756", "0.58651805", "0.5843682", "0.58381593", "0.58047706", "0.5803293", "0.57889736", "0.5785677" ]
0.7331623
0
Test case for group required decorator
def test_5_group_required(self): user = User.objects.get(email=data_user['email']) self.factory = RequestFactory() @group_required('default') def test(request): return 200 request = self.factory.get('/foo') request.user = user response = test(request) self.assertEqual(response.status_code, 302) group = Group.objects.create( name='default', ) user.groups.add(group) request = self.factory.get('/foo') request.user = user response = test(request) self.assertEqual(response, 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def group_required(*groups):\n\n def decorator(func):\n @wraps(func)\n def check_auth(*args, **kwargs):\n check_user_group(*groups)\n return func (*args, **kwargs)\n return check_auth\n return decorator", "def test_2_group_required(self):\n func = base_views.BlogDetailView\n anonymous_required(func)", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_3_group_required_none(self):\n func = base_views.BlogDetailView\n group_required(func)", "def group_required(self, group):\n\n def decorator(view):\n @functools.wraps(view)\n def decorated(*args, **kwargs):\n log.info(\"Trying to get access to resource: %s protected by group: %s\", view.__name__, group)\n if request.method == 'POST':\n token = request.form['token']\n if self.development or self.group_authenticated(token, group):\n return view(*args, **kwargs)\n else:\n log.warning(\"User has not been authorized to get access to resource: %s\", view.__name__)\n else:\n log.error(\"Bad request type! Expected 'POST', actual '%s'\", request.method)\n\n return abort(403)\n\n return decorated\n return decorator", "def test_get_group(self):\n pass", "def test_add_group(self):\n pass", "def test_stepregistry_module_should_have_global_step_decorators():\n # given & when\n from radish.stepregistry import given, when, then, step\n\n # then\n assert callable(given)\n assert callable(when)\n assert callable(then)\n assert callable(step)", "def test_stepregistry_should_create_one_step_decorator_per_keyword():\n # given\n registry = StepRegistry()\n context = {}\n\n # when\n registry.create_step_decorators(context)\n\n # then\n assert len(context) == 4\n assert \"given\" in context\n assert \"when\" in context\n assert \"then\" in context\n assert \"step\" in context", "def test_wraps():\n print('func')", "def test_stepregitry_register_func_with_multiple_decorators():\n # given\n registry = StepRegistry()\n context = {}\n registry.create_step_decorators(context)\n\n # when\n def test_step():\n ...\n\n test_step = context[\"given\"](\"pattern\")(test_step)\n test_step = context[\"when\"](\"pattern\")(test_step)\n\n # then\n assert registry.step_implementations(\"Given\") == [\n StepImpl(\"Given\", \"pattern\", test_step)\n ]\n assert registry.step_implementations(\"When\") == [\n StepImpl(\"When\", \"pattern\", test_step)\n ]", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_test_group_parameters(self):\n pass", "def test_decorator(f):\n return f", "def test_trivial(self):\n group = Group()", "def test_read_group(self):\n pass", "def test_limit_as_runs_with_spawn_raises() -> None:\n with pytest.raises(ValueError):\n\n @restricted(name=\"hello\", context=\"spawn\")\n def limited_func_with_decorator_spawn() -> None:\n \"\"\"A restricted function\"\"\"\n pass", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_stepregitry_step_decorators_for_all_keywords():\n # given\n registry = StepRegistry()\n context = {}\n registry.create_step_decorators(context)\n\n # when\n def test_step():\n ...\n\n test_step = context[\"step\"](\"pattern\")(test_step)\n\n # then\n assert registry.step_implementations(\"Given\") == [\n StepImpl(\"Step\", \"pattern\", test_step)\n ]", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def test_verify_that_you_can_create_a_new_group():", "def decorator_group(decorators):\n def group(f):\n for decorator in decorators:\n f = decorator(f)\n return f\n return group", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_hookregistry_module_should_have_global_hook_decorators():\n # given & when\n from radish.hookregistry import before, after\n\n # then\n assert callable(before.all)\n assert callable(before.each_feature)\n assert callable(before.each_rule)\n assert callable(before.each_scenario)\n assert callable(before.each_step)\n assert callable(after.all)\n assert callable(after.each_feature)\n assert callable(after.each_rule)\n assert callable(after.each_scenario)\n assert callable(after.each_step)", "def test_break_security_group_usual_case():", "def test_require_in_call_silently_succeeds_for_available_tests(self, test_generator):\n # pylint: disable=function-redefined\n\n with self.subTest(\"direct decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_call\n def decorated():\n pass\n\n check.assert_not_called()\n decorated()\n check.assert_called_once()\n\n with self.subTest(\"named decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_call(\"sentinel name\")\n def decorated():\n pass\n\n check.assert_not_called()\n decorated()\n check.assert_called_once()" ]
[ "0.6848307", "0.6808511", "0.67380023", "0.67380023", "0.66136533", "0.6525565", "0.64716965", "0.64529186", "0.642777", "0.63977575", "0.63896275", "0.6355148", "0.6331094", "0.62882537", "0.62863505", "0.625996", "0.62579274", "0.6232457", "0.6173068", "0.6173068", "0.6169589", "0.61084795", "0.61084795", "0.6098721", "0.6097893", "0.6057163", "0.6057163", "0.6053693", "0.60273063", "0.600479" ]
0.683177
1
metaDataInputAvailable(inputType, inputKey) Return true if inputType with inputKey is available.
def metaDataInputAvailable(inputType, inputKey): # Check if it is on metadata: # FIXME How can I do that using objKeyStore?? flag = False from RecExConfig.InputFilePeeker import inputFileSummary metaItemList=inputFileSummary.get('metadata_itemsList') if ( '%s#%s' % (inputType, inputKey) ) in metaItemList: flag = True mlog.verbose(("metaItemList does have ContainerType input %s with " "key %s."), inputType, inputKey) else: mlog.verbose(("metaItemList does NOT have ContainerType input %s with " "key %s."), inputType, inputKey) return flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_key_input(self, name: str) -> bool:\n return (\n self.allow_dynamic\n and self.dynamic_inputs\n and bool(self.dynamic_inputs.get(name, False))\n )", "def has_input(self, input_ref):\n inputs = self.get_recipe_inputs()\n for (input_role_name, input_role) in inputs.items():\n for item in input_role.get(\"items\", []):\n if item.get(\"ref\", None) == input_ref:\n return True\n return False", "def inputReceived(self, inPars, queryMeta):\n\t\tif not self._isActive(inPars):\n\t\t\treturn False\n\t\tkeysFound, keysMissing = [], []\n\t\tfor f in self.inputKeys:\n\t\t\tif inPars.get(f.name) is None:\n\t\t\t\tkeysMissing.append(f)\n\t\t\telse:\n\t\t\t\tif f.value!=inPars.get(f.name): # non-defaulted\n\t\t\t\t\tkeysFound.append(f)\n\t\tif not keysMissing:\n\t\t\treturn True\n\n\t\t# keys are missing. That's ok if none were found and we're not required\n\t\tif not self.required and not keysFound:\n\t\t\treturn False\n\t\tif self.required:\n\t\t\traise base.ValidationError(\"is mandatory but was not provided.\", \n\t\t\t\tcolName=keysMissing[0].name)\n\n\t\t# we're optional, but a value was given and others are missing\n\t\tif not self.combining:\n\t\t\traise base.ValidationError(\"When you give a value for %s,\"\n\t\t\t\t\" you must give value(s) for %s, too\"%(keysFound[0].getLabel(), \n\t\t\t\t\t\t\", \".join(k.name for k in keysMissing)),\n\t\t\t\t\tcolName=keysMissing[0].name)\n\t\treturn True", "def has_input_names(self) -> bool:\n return self.inputs and self.inputs[0].name is not None", "def is_dynamic_input(self, name: str) -> bool:\n return self.get_input_type(name) == IN_OPTIONAL", "def has_input(self, name: str) -> bool:\n return self.get_input_type(name) != IN_INVALID", "def validateInputType(self, inputType):\n raise NotImplementedError()", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def has(self, name):\n try:\n if self.input(name) is None:\n return False\n except KeyError:\n return False\n\n return True", "def can_handle(self, handler_input):\n return is_request_type(\"LaunchRequest\")(handler_input)", "def is_required_data(self, typename):\r\n return typename in self.required_data_products", "def is_required_data(self, typename):\n return typename in self.required_data_products", "def meta_available(self):\n return self._meta_available", "def has_user_data(self, key):\n return isinstance(self._user_data, dict) and key in self._user_data", "def GetNativeInputInfo(is_optional):\r\n raise Exception(\"Abstract method\")", "def has(self, input_):\n name = None\n if isinstance(input_, str):\n name = input_\n elif isinstance(input_, Item):\n name = input_.name\n\n for item in self.items:\n if name == item.name:\n if isinstance(input_, Item):\n return item.amount >= input_.amount\n return True\n return False", "def _supports(self, item):\n return type(item) in Result.SUPPORTED_DATA", "def has(self, key):\r\n # handle any special cases\r\n if key.scope == Scope.content:\r\n self._load_definition()\r\n elif key.scope == Scope.parent:\r\n return True\r\n\r\n # it's not clear whether inherited values should return True. Right now they don't\r\n # if someone changes it so that they do, then change any tests of field.name in xx._field_data\r\n return key.field_name in self._fields", "def isSupportedData(self, data, info):\n return True", "def _check_image_is_supported(self):\n\t\tSUPPORTED = {}\n\t\tSUPPORTED['RECORD_TYPE'] = 'FIXED_LENGTH',\n\t\tSUPPORTED['SAMPLE_BITS'] = 8, 16\n\t\tSUPPORTED['SAMPLE_TYPE'] = ( 'UNSIGNED_INTEGER',\n\t\t\t\t'MSB_UNSIGNED_INTEGER',\n\t\t\t\t'LSB_INTEGER',\n\t\t\t\t'MSB_INTEGER'\n\t\t\t\t)\n\n\t\timageIsSupported = True\n\n\t\tif not self.labels.has_key('IMAGE'):\n\t\t\tif self.log: self.log.warn(\"No image data found\")\n\t\t\timageIsSupported = False\n\n\t\trecordType = self.labels['RECORD_TYPE']\n\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\n\t\tif recordType not in SUPPORTED['RECORD_TYPE']:\n\t\t\terrorMessage = (\"RECORD_TYPE '%s' is not supported\") % (recordType)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\t\tif imageSampleBits not in SUPPORTED['SAMPLE_BITS']:\n\t\t\terrorMessage = (\"SAMPLE_BITS '%s' is not supported\") % (imageSampleBits)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\t\tif imageSampleType not in SUPPORTED['SAMPLE_TYPE']:\n\t\t\terrorMessage = (\"SAMPLE_TYPE '%s' is not supported\") % (imageSampleType)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\n\t\treturn imageIsSupported", "def is_valid(user_input, card_type=None, skip=False):\n \n i = user_input.upper()\n if i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n if skip:\n if i == 'X':\n return True\n if card_type:\n key_list = [key for key in Board.input_decoder \n if Board.input_decoder[key].type == card_type]\n if i in key_list:\n return True\n elif not card_type:\n if i in Board.input_decoder:\n return True \n else:\n return False", "def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False", "def hasCustomData( self, key ):\n return str(key) in self._customData", "def has_meta(self, meta_name):\n return self.has_meta_class(meta_name) or \\\n self.has_meta_function(meta_name)", "def external_input_ready(self):\n return True", "def add_input(self, name: str, is_key: bool = False) -> None:\n if not self.allow_dynamic:\n raise TypeError(\"Dynamic inputs are not allowed\")\n name = sys.intern(name)\n if self.has_input(name):\n return\n if self.dynamic_inputs is None:\n self.dynamic_inputs = {name: is_key}\n else:\n self.dynamic_inputs[name] = is_key", "def exists(self, key_name: str) -> bool:\n pass", "def has_data(self) -> bool:\n raise NotImplementedError", "def isValidInputOutputData(self, inputVolumeNode):\n if not inputVolumeNode:\n logging.debug('isValidInputOutputData failed: no input volume node defined')\n return False\n return True", "def meta_available(self, meta_available):\n self._meta_available = meta_available" ]
[ "0.60552776", "0.5587217", "0.55770856", "0.54691964", "0.5453386", "0.54281676", "0.53389907", "0.5317259", "0.5309723", "0.5181332", "0.5121032", "0.51012117", "0.50994945", "0.5076581", "0.5036891", "0.50252545", "0.49921212", "0.49809375", "0.4978585", "0.49596685", "0.49595946", "0.49481493", "0.49271315", "0.49164417", "0.49112746", "0.49023333", "0.48824215", "0.48771876", "0.48674345", "0.48657936" ]
0.8558229
0
Test creation and deletion of tables.
def test_create(self): cursor = connection.cursor() # It needs to take at least 2 args self.assertRaises(TypeError, db.create_table) self.assertRaises(TypeError, db.create_table, "test1") # Empty tables (i.e. no columns) are not fine, so make at least 1 db.create_table("test1", [('email_confirmed', models.BooleanField(default=False))]) db.start_transaction() # And should exist cursor.execute("SELECT * FROM test1") # Make sure we can't do the same query on an empty table try: cursor.execute("SELECT * FROM nottheretest1") self.fail("Non-existent table could be selected!") except: pass # Clear the dirty transaction db.rollback_transaction() db.start_transaction() # Remove the table db.delete_table("test1") # Make sure it went try: cursor.execute("SELECT * FROM test1") self.fail("Just-deleted table could be selected!") except: pass # Clear the dirty transaction db.rollback_transaction() db.start_transaction() # Try deleting a nonexistent one try: db.delete_table("nottheretest1") self.fail("Non-existent table could be deleted!") except: pass db.rollback_transaction()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertCountEqual(result,\n ['bioms', 'counts', 'networks',\n 'taxonomy', 'edges', 'samples', 'meta']\n )\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def test_table_definition(self):\n create_table(LowercaseKeyModel)\n create_table(CapitalizedKeyModel)\n\n delete_table(LowercaseKeyModel)\n delete_table(CapitalizedKeyModel)", "def test_table_definition(self):\r\n create_table(LowercaseKeyModel)\r\n create_table(CapitalizedKeyModel)\r\n\r\n delete_table(LowercaseKeyModel)\r\n delete_table(CapitalizedKeyModel)", "def test_create_tables_cmd_success(self):\n self.runner.invoke(cli,\n args=['create-tables'],\n env={'OS_ELASTICSEARCH_ADDRESS': LOCAL_ELASTICSEARCH})\n engine = config.get_engine()\n inspector = Inspector.from_engine(engine)\n self.assertTrue('models' not in inspector.get_table_names())", "def test_delete_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn_object.delete_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n self.assertEqual(len(result), 0)\n cur.close()\n conn.close()", "def test_create_table(self):\n self.assertEqual(\n ['CREATE', 'TABLE', 'T1', '(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n)'],\n grammar._CREATE_TABLE.parseString(\n 'CREATE TABLE IF NOT EXISTS `T1`(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n);'\n ).asList()\n )", "def tearDown(self):\n drop_all_tables()\n create_all()", "def test_create_table_successfully (self):\n\n new_table = self.wrapper.create_table(self.table, [self.bob, self.jane])\n self.assertIsNone(new_table)", "def test_db_schema(client):\n table_names = [\"user\", \"house\", \"user_role\", \"model_param\"]\n with db.engine.connect() as connexion:\n for table_name in table_names:\n assert db.engine.dialect.has_table(connexion, table_name) == True", "def test_drop_tables(self):\n self.assertEqual(Manager.table_exists().run_sync(), True)\n self.assertEqual(Band.table_exists().run_sync(), True)\n\n drop_tables(Manager, Band)\n\n self.assertEqual(Manager.table_exists().run_sync(), False)\n self.assertEqual(Band.table_exists().run_sync(), False)", "def test_db_table_creation_check(self):\n mock_cursor = Mock()\n mock_cursor.configure_mock(**{\"cursor.return_value.fetchone.return_value\": (\"vnf_table_2\")})\n status = misshtbtd.db_table_creation_check(mock_cursor, \"vnf_table_2\")\n self.assertEqual(status, True)", "def create_tables(self):\n if self.mock:\n mock_dynamodb2(self._create_tables())\n else:\n self._create_tables()", "def test_table_drop(app, runner):\n result = runner.invoke(drop_tables, input=\"y\")\n\n with app.app_context():\n assert not db.engine.has_table('link')\n assert not db.engine.has_table('user')", "def test_create_table(self):\r\n function_name = sys._getframe().f_code.co_name\r\n db_name = \"{}_{}\".format(function_name, \"db\")\r\n db_name_illegal_by_rdb = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n db_name_illegal_by_this_program = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n table_name = \"{}_{}\".format(function_name, \"table\")\r\n table_name_illegal_by_rdb = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n table_name_illegal_by_this_program = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n\r\n test_list_1 = [\r\n db_name,\r\n table_name,\r\n None,\r\n None,\r\n None,\r\n None\r\n ]\r\n test_list_2 = [\r\n db_name,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_3 = [\r\n db_name,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n test_list_4 = [\r\n db_name_illegal_by_rdb,\r\n table_name,\r\n None\r\n ]\r\n test_list_5 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_6 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n test_list_7 = [\r\n db_name_illegal_by_this_program,\r\n table_name,\r\n None\r\n ]\r\n test_list_8 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_rdb,\r\n None\r\n ]\r\n test_list_9 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_this_program,\r\n None\r\n ]\r\n\r\n crd(self.c, test_list_1[0])\r\n test_list_1[len(test_list_1) - 1] = isinstance(\r\n crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.TableCreate\r\n )\r\n test_list_1[len(test_list_1) - 2] = crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 3] = crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 4] = isinstance(\r\n crt(\r\n self.c,\r\n test_list_1[1],\r\n test_list_1[0],\r\n True\r\n ),\r\n r.ast.TableCreate\r\n )\r\n dd(self.c, test_list_1[0])\r\n\r\n crd(self.c, test_list_2[0])\r\n \"\"\"Test 1.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_2[len(test_list_2) - 1] = crt(\r\n self.c,\r\n test_list_2[1],\r\n test_list_2[0]\r\n )\r\n dd(self.c, test_list_2[0])\r\n\r\n crd(self.c, test_list_3[0])\r\n \"\"\"Test 2.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_3[len(test_list_3) - 1] = crt(\r\n self.c,\r\n test_list_3[1],\r\n test_list_3[0]\r\n )\r\n dd(self.c, test_list_3[0])\r\n\r\n \"\"\"Test 3.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_4[len(test_list_4) - 1] = crt(\r\n self.c,\r\n test_list_4[1],\r\n test_list_4[0]\r\n )\r\n\r\n \"\"\"Test 4.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_5[len(test_list_5) - 1] = crt(\r\n self.c,\r\n test_list_5[1],\r\n test_list_5[0]\r\n )\r\n\r\n \"\"\"Test 5.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_6[len(test_list_6) - 1] = crt(\r\n self.c,\r\n test_list_6[1],\r\n test_list_6[0]\r\n )\r\n\r\n r.db_create(test_list_7[0]).run(self.c)\r\n \"\"\"Test 6.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_7[len(test_list_7) - 1] = crt(\r\n self.c,\r\n test_list_7[1],\r\n test_list_7[0]\r\n )\r\n r.db_drop(test_list_7[0]).run(self.c)\r\n\r\n r.db_create(test_list_8[0]).run(self.c)\r\n \"\"\"Test 7.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_8[len(test_list_8) - 1] = crt(\r\n self.c,\r\n test_list_8[1],\r\n test_list_8[0]\r\n )\r\n r.db_drop(test_list_8[0]).run(self.c)\r\n\r\n r.db_create(test_list_9[0]).run(self.c)\r\n \"\"\"Test 8.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_9[len(test_list_9) - 1] = crt(\r\n self.c,\r\n test_list_9[1],\r\n test_list_9[0]\r\n )\r\n r.db_drop(test_list_9[0]).run(self.c)\r\n\r\n self.assertTrue(test_list_1[len(test_list_1) - 1]) # Test 9.\r\n self.assertIsNotNone(test_list_1[len(test_list_1) - 2]) # Test 10.\r\n self.assertIsNone(test_list_1[len(test_list_1) - 3]) # Test 11.\r\n self.assertFalse(test_list_1[len(test_list_1) - 4]) # Test 12.\r\n self.assertIsNone(test_list_2[len(test_list_2) - 1]) # Test 13.\r\n self.assertIsNone(test_list_3[len(test_list_3) - 1]) # Test 14.\r\n self.assertIsNone(test_list_4[len(test_list_4) - 1]) # Test 15.\r\n self.assertIsNone(test_list_5[len(test_list_5) - 1]) # Test 16.\r\n self.assertIsNone(test_list_6[len(test_list_6) - 1]) # Test 17.\r\n self.assertIsNone(test_list_7[len(test_list_7) - 1]) # Test 18.\r\n self.assertIsNone(test_list_8[len(test_list_8) - 1]) # Test 19.\r\n self.assertIsNone(test_list_9[len(test_list_9) - 1]) # Test 20.\r", "def create_tables():\n db.create_all()", "def delete_tables(self):\n if self.mock:\n mock_dynamodb2(self._delete_tables())\n else:\n self._delete_tables()", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables", "def test_dummydb_new_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)", "def setUp(self):\n self.conn = seed.connect_to_db(\"testing\")\n self.cur = self.conn.cursor()\n\n seed.cur = self.conn.cursor()\n seed.conn = self.conn\n\n self.tables = [\n {\n \"name\": \"people\", \n \"schema\": [(\"firstname\", \"10\", \"VARCHAR\"), (\"lastname\", \"10\", \"VARCHAR\"), (\"age\", \"3\", \"INTEGER\"), (\"active\", \"1\", \"BOOLEAN\")]\n },\n {\n \"name\": \"animals\",\n \"schema\": [(\"animal_id\", \"7\", \"INTEGER\"), (\"name\", \"10\", \"VARCHAR\"), (\"species\", \"20\", \"VARCHAR\")]\n },\n {\n \"name\":\"testformat1\",\n \"schema\": [(\"name\", \"10\", \"VARCHAR\"), (\"valid\", \"1\", \"BOOLEAN\"), (\"count\", \"3\", \"INTEGER\")]\n }\n ]\n for table in self.tables:\n seed.create_table(table[\"name\"], table[\"schema\"])", "def create_all_tables(self):\n pass", "def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")", "def test_create(self):\n Base = declarative_base()\n my_conn = MySQL(*self.conn_params)\n\n # table creation can be done via execute() + raw SQL or using this:\n class Table2(Base):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'table2'\n\n column_int = Column(Integer)\n column_string = Column(String(20))\n column_float = Column(Float)\n column_datetime = Column(DateTime)\n column_boolean = Column(Boolean)\n id = Column(Integer, primary_key=True)\n\n Table2.__table__.create(bind=my_conn.engine)\n table2 = my_conn.get_table('table2')\n self.assertEqual(table2.c.column_datetime.name, 'column_datetime')\n self.assertEqual(len(table2.c), 6)\n my_conn.drop('table2')", "def test_drop_table(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # make sure table exists\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n\n tracker.drop_table()\n\n with pytest.raises(ProgrammingError):\n tracker.drop_table()", "def testTable(self):\n self.assertGreater(len(self.auth.table(self.dataset, self.table)), 0)", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def create_table(self):\n pass", "def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)", "def test_delete_non_existent_table(self):\n\n non_existing_table = self.wrapper.delete_table(self.table_delete)\n self.assertWarns(Warning, non_existing_table)" ]
[ "0.79065996", "0.76564527", "0.76478964", "0.7574287", "0.75223595", "0.7512179", "0.750431", "0.7431251", "0.73941386", "0.73464495", "0.72949237", "0.7286949", "0.7275599", "0.71517336", "0.70496744", "0.7047443", "0.7015091", "0.7015091", "0.70136803", "0.6993084", "0.6986021", "0.6973394", "0.6949851", "0.68969923", "0.68898714", "0.6881164", "0.6879565", "0.6873365", "0.6863624", "0.685597" ]
0.802963
0
Span/RBW ratio {1, 10000}
def span_rbw_ratio(self): res = self._visa.query(f"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?") return 1 / float(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def golden_ratio():\n return 1.61803398875", "def rbw_vbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:VIDEO:RATIO?\")\r\n return 1 / float(res)", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def strm_bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if StatsRouter.global_strm_mean == 0.0: return 0\r\n else: return (1.0*bw)/StatsRouter.global_strm_mean", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def calculate_1rm_brzycki(w: float, r: int):\n\n if r == 0:\n return 0\n\n if w == 0:\n w = 1\n\n return round(w*36/(37 - r), 2)", "def ratio2weights(ratio):\n if ratio <= 1.0:\n lweight = ratio / (1.0 + ratio)\n else:\n lweight = 1.0 / (1.0 + 1.0 / ratio)\n return lweight, 1.0 - lweight", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def question_18():\n rbf = RadialBiasFunction()\n wins = 0.0\n for i in range(100):\n rbf.fit(1.5, 9)\n rbf_error = rbf.error()\n if rbf_error == 0:\n wins += 1\n rbf.resample()\n return wins / 100", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def bmi(weight, height):\n return weight / height ** 2", "def frame_rate():\n def r(x):\n return 6E7/x\n\n def w(x):\n return int(6E7/x)\n return r, w", "def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def _compute_bn(self, lvl):\n bn = [0] # number of samples crossing the left/right boundary\n for n in range(lvl):\n # 1. down-sampling of N samples by the factor scl gives (N-1)//scl + 1 samples\n # 2. bn[-1]+M-1 is the number of samples acrossing the left/right boundary, with M being the number of freqeuncies\n # => hence after the downsampling the number of boundary crossing samples is:\n bn.append((bn[-1]+self.nfreq-2)//self.scaling+1)\n bn.append(bn[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return bn[1:][::-1]", "def sampling_ratio(self):\n return self.coincidences / self.n", "def ratio_func(a, b):\n return a / b", "def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0", "def smooth(self, numerator , denominator): \n return (numerator+1)/(denominator+len(self.vocab))", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def ratio_calc(first_strandI, second_strandI):\n if first_strandI + second_strandI != 0:\n Ratio = first_strandI / float(first_strandI + second_strandI)\n return Ratio\n else:\n return np.nan", "def calculate_br_down_metric(br_down):\n if br_down < 1:\n br_down = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_down) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def dd_vtr_duration_ratio_map_nb(record):\n return dd_vtr_duration_map_nb(record) / dd_duration_map_nb(record)", "def calculate_1rm_epley(w: float, r: int):\n\n if r == 1:\n return w\n if r == 0:\n return 0\n\n if w == 0:\n w = 1\n\n return round(w*(1 + r/30), 2)", "def _bleu_score_compute(preds_len: Tensor, target_len: Tensor, numerator: Tensor, denominator: Tensor, n_gram: int, weights: Sequence[float], smooth: bool) ->Tensor:\n device = numerator.device\n if min(numerator) == 0.0:\n return tensor(0.0, device=device)\n if smooth:\n precision_scores = torch.div(torch.add(numerator, torch.ones(n_gram, device=device)), torch.add(denominator, torch.ones(n_gram, device=device)))\n precision_scores[0] = numerator[0] / denominator[0]\n else:\n precision_scores = numerator / denominator\n log_precision_scores = tensor(weights, device=device) * torch.log(precision_scores)\n geometric_mean = torch.exp(torch.sum(log_precision_scores))\n brevity_penalty = tensor(1.0, device=device) if preds_len > target_len else torch.exp(1 - target_len / preds_len)\n bleu = brevity_penalty * geometric_mean\n return bleu", "def TCMB(rs):\n\n return 0.235e-3 * rs", "def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio", "def compute_strand_balance(record):\n try:\n info = record.info\n except:\n info = record.INFO\n\n strand_bal = [strand_ratio(info[\"SAF\"][i], info[\"SAR\"][i]) for i in range(len(info[\"SAF\"]))]\n\n return strand_bal" ]
[ "0.67297596", "0.65892833", "0.6565979", "0.64761853", "0.64374167", "0.62121385", "0.61042845", "0.6101944", "0.60885817", "0.6046705", "0.595288", "0.58912903", "0.5881694", "0.5870998", "0.5861205", "0.5819621", "0.58036536", "0.57961994", "0.57926303", "0.5792179", "0.5784649", "0.57787675", "0.57731855", "0.57547414", "0.5748325", "0.57263225", "0.5716414", "0.5695576", "0.5649142", "0.56491196" ]
0.7564076
0
RBW/Video BW ratio {0.001, 100}
def rbw_vbw_ratio(self): res = self._visa.query(f"SENSE{self._screen()}:BANDWIDTH:VIDEO:RATIO?") return 1 / float(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def strm_bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if StatsRouter.global_strm_mean == 0.0: return 0\r\n else: return (1.0*bw)/StatsRouter.global_strm_mean", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def frame_rate():\n def r(x):\n return 6E7/x\n\n def w(x):\n return int(6E7/x)\n return r, w", "def set_bw(self, mode):\n self.bandwidth = 0\n if (mode=='a'):\n self.bandwidth = 54\n elif(mode=='b'):\n self.bandwidth = 11\n elif(mode=='g'):\n self.bandwidth = 54 \n elif(mode=='n'):\n self.bandwidth = 600\n elif(mode=='ac'):\n self.bandwidth = 6777 \n \n return self.bandwidth", "def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0", "def get_iperf_bw(self, filename):\n #last line has avg values\n for line in open(filename, 'r'):\n pass\n bw = line.split(',')[-1].strip()\n return int(bw)/1000 # bw in kbps", "def ratio(self):\n return float(self.max_width) / self.max_height", "def vratio(self):\n return self.run_command('vratio')[0]", "def subbandwidth(self):", "def bw_calc(start_time, end_time, packet_bit_len):\n delta_time = end_time - start_time\n delta_time = delta_time.seconds + delta_time.microseconds/1E6\n #Bandwidth b/s\n bw = packet_bit_len/delta_time\n return", "def _dBmTomW(dBm):\n return math.pow(10.0, dBm / 10.0)", "def bands(self) -> int:\n ...", "def aspect_ratio(self):\n ar = (self._block[1] >> 2) & 0x03\n asp_ratio = ''\n\n if ar == 0x00:\n asp_ratio = '4:3 AR'\n elif ar == 0x01:\n asp_ratio = '16:9 AR'\n elif ar == 0x02:\n asp_ratio = '16:10 AR'\n elif ar == 0x03:\n asp_ratio = '15:9 AR'\n\n return asp_ratio", "def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def Buffer(height=16, width=16, profondeur=1,rvb=255 ): \n\tp=[rvb] \n\tmyb=height*width*profondeur\n\tprint\"Memory : %ikB\" % (myb/1024)\n\tb=p*myb\n\treturn b", "def _get_bandwidth(self, report):\n match = re.search(\"bw\\=\\s*(\\d+\\.{0,1}\\d*)\\s*(\\w+)\\/s\",\n report)\n if match:\n bandwidth = float(match.group(1))\n unit = match.group(2)\n if unit.lower() == 'b':\n bandwidth = round(bandwidth / 1024 / 1024, 2)\n elif \"kb\" in unit.lower():\n bandwidth = round(bandwidth / 1024, 2)\n elif \"gb\" in unit.lower():\n bandwidth = round(bandwidth * 1024, 2)\n\n return bandwidth", "def golden_ratio():\n return 1.61803398875", "def calculate_br_down_metric(br_down):\n if br_down < 1:\n br_down = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_down) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def getIFBW(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SENS:BWID?\")\n ret = int(self.myFieldFox.read())\n else:\n ret = 1000000\n return ret", "def hern_bulge_mass(r,b):\n rb = r/b\n return ((rb*rb)/(2*(1+rb)**2.))", "def bitrate(self) -> float:\n msb = self._read_u8(_REG_BITRATE_MSB)\n lsb = self._read_u8(_REG_BITRATE_LSB)\n return _FXOSC / ((msb << 8) | lsb)", "def video_bitrate(self):\n # type: () -> int\n return self._video_bitrate", "def test_mixing_ratio():\n p = 998. * units.mbar\n e = 73.75 * units.mbar\n assert_almost_equal(mixing_ratio(e, p), 0.04963, 2)", "def test_visualize_wfe_budget():\n nrc = webbpsf.NIRCam()\n nrc.visualize_wfe_budget()", "def calc_wmr(BMR, PAL):\n return BMR * (PAL - 1)", "def bandwidth(self):\n return self.stop_hz - self.start_hz", "def bmi(weight, height):\n return weight / height ** 2" ]
[ "0.78411186", "0.74527633", "0.7150638", "0.7007005", "0.6494074", "0.6219985", "0.6060733", "0.6012118", "0.59977496", "0.5925012", "0.58851236", "0.57970375", "0.5781342", "0.57797515", "0.5775451", "0.57096577", "0.56438977", "0.56283444", "0.56279147", "0.5622818", "0.5614004", "0.56019056", "0.55715585", "0.556367", "0.5555887", "0.5546182", "0.5527419", "0.55139005", "0.54993933", "0.5498099" ]
0.825476
0
Test if create_knxipframe of base class raises an exception.
async def test_create_knxipframe_err(self): xknx = XKNX() udp_client = UDPClient(xknx, ("192.168.1.1", 0), ("192.168.1.2", 1234)) request_response = RequestResponse(xknx, udp_client, DisconnectResponse) request_response.timeout_in_seconds = 0 with self.assertRaises(NotImplementedError): await request_response.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(AttributeError):\n knxipframe.init(23)\n\n with pytest.raises(CouldNotParseKNXIP):\n # this is not yet implemented in xknx\n knxipframe.init(KNXIPServiceType.SEARCH_REQUEST_EXTENDED)", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def test_base_class_expection():\n with pytest.raises(TypeError):\n cardinal.CardinalPoints()", "def test_tolerate_dumb_signature(self, exception_class):\n\n try:\n i_live_but_why = exception_class(616)\n except Exception as exc:\n pytest.fail(str(exc))\n\n assert isinstance(i_live_but_why, exception_class)", "def test_construct_frame_tag_error(attributes, exception, error_msg):\n with pytest.raises(exception) as exc:\n Frame(**attributes)\n\n assert error_msg in str(exc)", "def unexpectedException(self):", "def test_not_h5py_group(self):\n with self.assertRaises(TypeError):\n self.map_digis(None)", "def test_parsing_too_long_knxip(self):\n raw = (\n 0x06,\n 0x10,\n 0x02,\n 0x07,\n 0x00,\n 0x10,\n 0x15,\n 0x00,\n 0x08,\n 0x01,\n 0xC0,\n 0xA8,\n 0xC8,\n 0x0C,\n 0xC3,\n 0xB4,\n 0x00,\n )\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(CouldNotParseKNXIP):\n knxipframe.from_knx(raw)", "def test_controller_status_from_knx_wrong_code(self):\n with pytest.raises(CouldNotParseKNXIP):\n DPTControllerStatus.from_knx((0x00,))", "def unexpected_error(self, exception):", "def test_class_errored(self, cls, exception):", "def test_parsing_too_long_knxip(self):\n raw = (\n 0x06,\n 0x10,\n 0x02,\n 0x07,\n 0x00,\n 0x10,\n 0x15,\n 0x00,\n 0x08,\n 0x01,\n 0xC0,\n 0xA8,\n 0xC8,\n 0x0C,\n 0xC3,\n 0xB4,\n 0x00,\n )\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(CouldNotParseKNXIP):\n knxipframe.from_knx(raw)", "def test_verify_dicom_instance_exception(\n mock_engine: DicomImagePiiVerifyEngine,\n get_mock_dicom_instance: pydicom.dataset.FileDataset,\n):\n with pytest.raises(Exception) as exc_info:\n # Arrange\n padding_width = 25\n test_instance = deepcopy(get_mock_dicom_instance)\n del test_instance.PixelData\n expected_error_type = AttributeError\n\n # Act\n _, _, _ = mock_engine.verify_dicom_instance(test_instance, padding_width)\n\n # Assert\n assert expected_error_type == exc_info.typename", "def _check_exc(self):\n if self._exc is not None:\n raise self._exc", "def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def test_get_frame_no_source():\n frame_ingestor = FrameIngestor()\n with pytest.raises(RuntimeError):\n frame_ingestor.get_frame()", "def test_init_throws_missing_argument_exception(self):\n with self.assertRaises(Exception) as ex:\n MarkerId() # trying to create MarketId objectand waits for Exception\n\n self.getLogger().warning(\"Exception: %s\", ex.exception)", "def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n ClassicalIOChannel(0)", "def testUndefinedPlaybackRaisesException(self):\n\t\tx = BaseAction('x')\n\t\ttry:\n\t\t\tpass\n\t\texcept PlayackException, e:\n\t\t\tpass", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def test__init__raise_exception(self):\n self.assertRaises(TypeError, MasterNodeInterface)", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def test_init_throws_excessive_argument_exception(self):\n with self.assertRaises(Exception) as ex:\n # trying to create MarketId objectand waits for Exception\n MarkerId('test-name', 'arg2')\n\n self.getLogger().warning(\"Exception: %s\", ex.exception)", "def test_make_plot_invalid_plot_type(self):\n print(sys._getframe().f_code.co_name)\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n self.assertRaises(Exception,pp.make_plot,x,y,plot_type='wrong',msg='Invalid plot type')", "def test_can_instantiate(self):\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)", "def test_exc_on_missing_brack(self):\n with self.assertRaises(ExecutionException):\n pyint = Interpreter(limit=1)\n pyint.run(code=BF_MISSING_BRACK)", "def test_attempting_to_save_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n AbstractModelWithFullCols.create(pkey=1, data=2)", "def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n Channel(0)", "def testRaisesException(self):\n\t\tx = BaseAction('x')\n\t\tx.throws = Exception()\n\t\tself.failUnlessRaises(Exception, x.playback)", "def test_base_exception(self) -> None:\n with pytest.raises(BaseException) as e:\n 1 / 0\n assert isinstance(e.value, ZeroDivisionError)" ]
[ "0.7100994", "0.7027083", "0.62036103", "0.5987994", "0.592371", "0.58385265", "0.58301014", "0.5805744", "0.5727015", "0.57116526", "0.5697945", "0.567965", "0.56382775", "0.563067", "0.55982566", "0.5557917", "0.5549329", "0.55488443", "0.5522948", "0.552107", "0.5519002", "0.55177796", "0.551555", "0.54967403", "0.5482909", "0.54809076", "0.5476638", "0.5470366", "0.54648584", "0.54505014" ]
0.71374136
0
Create an event on the given group.
def create_event(self, event): body = event['body'] body = json.loads(body) # Check all required fields are here required_fields = ['group_id', 'event_timestamp', 'location'] for f in required_fields: if f not in body: return get_bad_request('POST body missing field {}'.format(f)) group_id = body['group_id'] event_timestamp = body['event_timestamp'] event_timestamp = int(event_timestamp) location = body['location'] recipe_name = None if 'recipe_name' in body: recipe_name = body['recipe_name'] event_name = None if 'event_name' in body: event_name = body['event_name'] user = self.mealShareUsers.get_user_cognito_data(event) current_user = user['user_id'] # Requesting user must already be a member if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)): return { 'statusCode': 401, 'statusMessage': 'User {} is not a member of the group ID {} and cannot create an event'.format(current_user, group_id), 'group_id': group_id, 'user_id': current_user } event_name = self.mealShareGroups.create_event(group_id, event_timestamp, location, recipe_name, event_name) if not event_name: return { 'statusCode': 500, 'statusMessage': 'FAILED to create event for group {} by user {}'.format(group_id, current_user), 'event_name': None } else: return { 'statusCode': 200, 'statusMessage': 'Successfully created {} by {} for {}'.format(event_name, current_user, group_id), 'group_id': group_id, 'user_id': current_user, 'event_name': event_name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_group(self, group):\n for obj in self.event_json['events']:\n event_id = obj['id']\n event = self.world.events[event_id]\n group.add(event)", "def create_new_event(self):\n pass", "async def createEvent(self, event: Event) -> None:", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def create_google_calendar_event(self, data):\n # TODO Refactor to celery\n gcalendar = services.GoogleCalendarService()\n gcalendar.initialize()\n\n event = {\n 'summary': f'{data.get(\"group\")} - {data.get(\"title\")}',\n 'location': data.get('location'),\n 'description': data.get('description'),\n 'start': {\n 'dateTime': data.get('start').isoformat(),\n },\n 'end': {\n 'dateTime': data.get('end').isoformat(),\n },\n }\n\n created_event = gcalendar.create_event(event)\n event_id = created_event.get('id')\n event_htmllink = created_event.get('htmlLink')\n\n return event_id, event_htmllink", "def _create_event(\n project,\n creator_id,\n datetime_start,\n datetime_end,\n description=\"Test Event\",\n location=\"test_location\",\n is_public=False,\n event_type=\"MN\",\n coordinator=None\n):\n event = Event(\n project=project,\n description=description,\n location=location,\n is_public=is_public,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n coordinator=coordinator,\n creator_id=creator_id\n )\n event.save()\n return event", "def create_group(self, tenant_id, group_id):\n maas_client = self._get_maas_client()\n d = maas_client.add_notification_and_plan()\n\n def create_group_in_db((notification, notification_plan)):\n return cass.create_group(\n self._db, tenant_id, group_id, notification, notification_plan)\n d.addCallback(create_group_in_db)\n\n return d", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def create_event(self, **kwargs):\n events = self.variables['events']\n events.append(kwargs)\n self.variables['events'] = events", "def create_event(klass, form, creator):\n\n if form.is_recurring.data:\n # Series\n return klass.create_series(form, creator)\n # Single event\n return klass.create_single_event(form, creator)", "def create_event(self):\n self.driver.get(f'{self.base_url}/event')\n\n enter_event_name = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'eventName')))\n enter_event_name.send_keys(self.random_string)\n\n # self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div[2]/div/div/div[1]/div/div[1]/div[1]/label[2]/span[1]').click()", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_event(organizer, description, location, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Event.objects.create(event_organizer=organizer, event_desctiption=description, event_location=loaction, event_date = time)", "def fusion_api_create_events(self, body, api=None, headers=None):\n return self.event.create(body, api, headers)", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def create_event(self, campaign, relative_to, offset, unit, delivery_hour, message=None, flow=None):\n payload = self._build_params(campaign_uuid=campaign, relative_to=relative_to, offset=offset, unit=unit,\n delivery_hour=delivery_hour, message=message, flow_uuid=flow)\n return Event.deserialize(self._post('events', None, payload))", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def __on_group_created(self, logger, *args):", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def create_event(self, name, date):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Event.objects.create(name=name, date=date, user_id=user.id)", "def create_and_add_event(self, event_data):\n event = event_from_dict(event_data)\n self.add_event(event)", "def test_create_event(self):\n event_type = 'SERVICE NOTIFICATION'\n fields = EVENT_FIELDS.get(event_type, None)\n parts = [\n 'nagiosadmin',\n 'nagios4',\n 'Root Partition',\n 'CRITICAL',\n 'notify-service-by-email',\n 'DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]\n event = create_event(\n timestamp=1603813628, event_type=event_type, hostname='docker-desktop', fields=fields._make(parts)\n )\n\n assert event['timestamp'] == 1603813628\n assert event['event_type'] == 'SERVICE NOTIFICATION'\n assert event[\"msg_title\"] == 'Root Partition'\n assert event[\"source_type_name\"] == 'SERVICE NOTIFICATION'\n assert event[\"msg_text\"] == 'CRITICAL'\n assert event['tags'] == [\n 'contact:nagiosadmin',\n 'host:nagios4',\n 'check_name:Root Partition',\n 'event_state:CRITICAL',\n 'notification_type:notify-service-by-email',\n 'payload:DISK CRITICAL - free space: / 1499 MB (2.46% inode=77%):'\n ]", "def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group", "def create_event() -> abc.Event:\n return get_asynclib().Event()", "def event_create(tenant_id, user_id=None):", "def createEvent(event):\n event = {\n 'summary': event.description,\n 'location': \"\",\n 'description': \"\",\n 'start': {\n 'dateTime': event.datetime_start,\n 'timeZone': \"America/Los_Angeles\"\n },\n 'end': {\n 'dateTime': event.datetime_end,\n 'timeZone': \"America/Los_Angeles\"\n },\n }\n\n event = service.events().insert(calendarId=SF_FUNCHEAP_CAL_ID, body=event).execute()", "def CreateNewEvent(arguments: List[Tuple[str, type]] = [], event_name: str = '') -> Event:\n pass", "def create_group_scene(self, name, group):\n data = {\n \"name\": name,\n \"group\": group,\n \"recycle\": True,\n \"type\": \"GroupScene\"\n }\n return self.bridge.bridge.post('/scenes', data)" ]
[ "0.6895084", "0.66228145", "0.6554237", "0.6434908", "0.6398888", "0.63672704", "0.6261991", "0.6242804", "0.61714673", "0.6137939", "0.6128037", "0.6087527", "0.6084018", "0.60555893", "0.6049443", "0.6049443", "0.6021393", "0.602137", "0.60209566", "0.6020377", "0.6008337", "0.59776086", "0.5956299", "0.5946207", "0.5916681", "0.5909057", "0.5862521", "0.5860794", "0.5852746", "0.5848599" ]
0.67467535
1
Get the email addresses collected between startdate and enddate.
def get_email_addresses(survey, startdatetime, enddatetime): token = settings.SURVEYGIZMO_API_TOKEN secret = settings.SURVEYGIZMO_API_TOKEN_SECRET emails = [] page = 1 more_pages = True survey_id = SURVEYS[survey]["email_collection_survey_id"] dtfmt = "%Y-%m-%d+%H:%M:%S" # Can't do anything without credentials. if token is None or secret is None: return emails while more_pages: response = requests.get( "https://restapi.surveygizmo.com/v2/survey/{survey}" "/surveyresponse?" "filter[field][0]=datesubmitted" "&filter[operator][0]=>=&filter[value][0]={start}" "filter[field][1]=datesubmitted" "&filter[operator][1]=<&filter[value][1]={end}" "&filter[field][2]=status&filter[operator][2]==" "&filter[value][2]=Complete" "&resultsperpage=500" "&page={page}" "&api_token={token}" "&api_token_secret={secret}".format( survey=survey_id, start=startdatetime.strftime(dtfmt), end=enddatetime.strftime(dtfmt), page=page, token=token, secret=secret, ), timeout=300, ) results = json.loads(response.content) total_pages = results.get("total_pages", 1) more_pages = page < total_pages emails = emails + [r["[question(13)]"] for r in results["data"]] page += 1 valid_emails = [] for email in emails: try: validate_email(email) except ValidationError: pass else: valid_emails.append(email) return valid_emails
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email_addresses(startdate, enddate, user, password):\n emails = []\n page = 1\n more_pages = True\n\n while more_pages:\n response = requests.get(\n 'https://restapi.surveygizmo.com/v2/survey/{survey}'\n '/surveyresponse?'\n 'filter[field][0]=datesubmitted'\n '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0'\n '&filter[operator][1]=<&filter[value][1]={end}+0:0:0'\n '&filter[field][1]=status&filter[operator][1]=='\n '&filter[value][1]=Complete'\n '&resultsperpage=500'\n '&page={page}'\n '&user:pass={user}:{password}'.format(\n survey=EMAIL_COLLECTION_SURVEY_ID, start=startdate,\n end=enddate, page=page, user=user, password=password))\n\n results = json.loads(response.content)\n total_pages = results['total_pages']\n more_pages = page < total_pages\n emails = emails + [r['[question(13)]'] for r in results['data']]\n\n return emails", "def get_events(self, start_date: datetime, end_date: datetime):\n\n events = []\n # Iterate through all events over the given\n for event_string in self._calendar.date_search(start_date, end_date):\n events.append(Event(event_string))\n return events", "def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def filter_meetings_by_date(self, start_date, end_date):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.select_sql, (start_date, end_date))\n rows = cursor.fetchall()\n\n cursor.close()\n db_connection.close_connection()\n except Exception:\n raise\n\n else:\n\n return rows", "def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list", "def get_emails(self, is_verified=True, include_primary=True):\n if include_primary:\n emails = self.associated_emails.filter(is_verified=is_verified)\n else:\n emails = self.associated_emails.filter(is_verified=is_verified,\n is_primary_email=False)\n return [ae.email for ae in emails]", "def getEmail(self, data):\r\n\t\tprint('test')\r\n\t\t# Empty array to hold unique emails\r\n\t\tno_dp_email = []\r\n\r\n\t\t# Loop through each row in the dataframe...\r\n\t\tfor row in data.itertuples():\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# Parse through the row's keywords string for emails...\r\n\t\t\temails = re.findall(\"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\", row.keywords)\r\n\t\t\tprint(emails)\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# For each email in the array...\r\n\t\t\tfor email in emails:\r\n\t\t\t\tprint('test')\r\n\r\n\t\t\t\temail = str(email)\r\n\r\n\t\t\t\t# Append this email onto the array if it is not a repeat\r\n\t\t\t\tif email not in no_dp_email:\r\n\t\t\t\t\tprint('test')\r\n\r\n\t\t\t\t\tno_dp_email.append(email)\r\n\t\t\r\n\t\t# return array of unique emails\r\n\t\treturn no_dp_email", "def get_emails():\n\n # generate the gmail api service\n service = build_gmail_api_v1()\n\n # compute date for one year ago\n today = date.today()\n one_year_ago = today - timedelta(days=365.25)\n start = one_year_ago - timedelta(days=1)\n end = one_year_ago + timedelta(days=1)\n start_string = start.strftime(\"%Y/%m/%d\")\n end_string = end.strftime(\"%Y/%m/%d\")\n query_string = f'after:{start_string} before:{end_string}'\n\n # generate the gmail api request (get list of messages from one year ago)\n request = service.users().messages().list(userId='me', q=query_string)\n\n # try to get the api response\n try:\n response = request.execute()\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n return []\n\n # get list of message ids from the api response\n messages = list(response[\"messages\"])\n ids = [message[\"id\"] for message in messages]\n\n # store all emails in a list\n data_to_display = []\n\n # loop through each message id\n for id in ids:\n\n try:\n # store email data in a dict\n email = {}\n\n # get message data by querying gmail api using message id\n request = service.users().messages().get(userId='me', id=id)\n response = request.execute()\n\n # get date, subject, from, to, etc from message header\n headers = list(response[\"payload\"][\"headers\"])\n looking_for = [\"Date\", \"Subject\", \"From\", \"To\"]\n for header in headers:\n if header[\"name\"] in looking_for:\n email[header[\"name\"]] = header[\"value\"]\n\n # try to get message body (base64) from response\n # the json structure varies a lot so that is why there are no many try/except\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][1][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n base64_message = \"Ti9B\"\n\n # decode the email body\n email[\"body\"] = base64.urlsafe_b64decode(\n base64_message).decode('utf-8')\n\n # populate list with email\n data_to_display.append(email)\n\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n\n return data_to_display", "def on_call_email_addresses(self):\n if self._on_call_email_addresses is not None:\n return self._on_call_email_addresses\n\n url = 'https://{}.pagerduty.com/api/v1/users/on_call'.format(self.pager_duty_domain_prefix)\n on_call = self._make_request(url, headers={'Authorization': 'Token token=' + self.pager_duty_token})\n users = set() # users can be in multiple schedule, this will de-dupe\n\n for user in on_call['users']:\n for schedule in user['on_call']:\n if schedule['level'] <= self.escalation_level:\n users.add(user['email'])\n\n log.info('Found %d users on-call', len(users))\n self._on_call_email_addresses = users\n return users", "def get_email_addresses(r):\n email_match = re.findall(r'[\\w.-]+@[\\w.-]+.\\w+', r)\n email_list = []\n if email_match:\n for match in email_match:\n if match not in email_list:\n email_list.append(match)\n email_list = set(email_list)\n return email_list", "def subscriber_email_addresses(self) -> Sequence[str]:\n return pulumi.get(self, \"subscriber_email_addresses\")", "def get_dates(self, candidates=None, start=None, end=None):\n if candidates is not None:\n return [date for date in candidates if date in self.data]\n if start is None:\n start = self.first_date\n if end is None:\n end = self.last_date\n return [date for date in self.data if start <= date <= end]", "def get_emails_from_addressbook(self, id, limit=0, offset=0):\n logger.info(\"Function call: get_emails_from_addressbook: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def _date_range(start: str, end: str) -> List[str]:\n start_dt = _parse_ISO8601_date(start)\n end_dt = _parse_ISO8601_date(end)\n if start_dt > end_dt:\n raise ValidationError(\n \"Start date needs to be greater than or equal end date.\"\n )\n if (\n start_dt < _parse_ISO8601_date('1900') or\n end_dt > datetime.datetime.now().astimezone()\n ):\n raise ValidationError(\n \"Start date needs to be less than 1900-01-01T00:00:00Z and end\"\n \" date can't be from the feature.\"\n )\n return map(lambda date: date.isoformat(), rrule(\n freq=DAILY,\n dtstart=start_dt,\n until=end_dt,\n cache=True\n ))", "def dates_inbetween(self, start, end):\n\n return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def get_emails(params, start_response):\n custodian = params.getfirst('custodian')\n date = params.getfirst('date')\n tfidf = params.getfirst('tfidf')\n out = json.dumps(documents_out(custodian, date, tfidf))\n status = '200 OK'\n response_headers = [('Content-type', 'application/json'),\n ('Access-Control-Allow-Origin', '*'),\n ('Content-Length', str(len(out)))]\n start_response(status, response_headers)\n return [out]", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def get_from_email(column_names, start_dates, end_dates, mail_server,\n account, sender, password):\n time_flag = None\n dfs = {test: pd.DataFrame(columns=column_names[test]) \\\n for test in [\"covid_ag\", \"flu_ag\"]}\n start_date = compare_dates(start_dates[\"covid_ag\"],\n start_dates[\"flu_ag\"], \"s\")\n end_date = compare_dates(end_dates[\"covid_ag\"],\n end_dates[\"flu_ag\"], \"l\")\n\n with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:\n for search_date in [start_date + timedelta(days=x)\n for x in range((end_date - start_date).days + 1)]:\n for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):\n for att in message.attachments:\n name = att.filename\n\n # Check the test type\n if \"Sars\" in name:\n test = \"covid_ag\"\n elif \"Flu\" in name:\n test = \"flu_ag\"\n else:\n continue\n\n # Check whether we pull the data from a valid time range\n whether_in_range = check_whether_date_in_range(\n search_date, start_dates[test], end_dates[test])\n if not whether_in_range:\n continue\n\n print(f\"Pulling {test} data received on %s\"%search_date.date())\n toread = io.BytesIO()\n toread.write(att.payload)\n toread.seek(0) # reset the pointer\n newdf = pd.read_excel(toread) # now read to dataframe\n newdf = regulate_column_names(newdf, test)\n dfs[test] = dfs[test].append(newdf)\n time_flag = search_date\n return dfs, time_flag", "def recipients(self) -> ty.List[str]:", "def getMatchesInDateRange(self, startDate=None, endDate=None):\n return None", "def get_emails(parsed_data):\n result = []\n known_values = []\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar': []}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n # parsing email address from contact block\n\n for contact, info in contacts.items():\n if info is not None:\n d = {'type': 2, 'data': '', 'properties': {}, 'special_properties': {}, 'is_valid': False, 'ref': {}}\n # properties dictionary\n is_valid = {}\n owner = {'owner': '', 'type': 11}\n organization = {'organization': '', 'type': 11}\n local_address = {'local_address': '', 'type': 5}\n domain_name = {'domain_name': '', 'type': 12}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if \"email_address\" in name:\n if name['email_address'] in known_values:\n break\n for feature in name.keys():\n if feature == \"email_address\":\n d['data'] = name['email_address']\n known_values.append(name['email_address'])\n\n if feature == \"full_name\":\n owner['owner'] = name['full_name']\n properties_list.append(owner)\n\n if feature == \"city_name\":\n organization['organization'] = name['city_name']\n properties_list.append(organization)\n\n d['is_valid'] = ''\n is_valid = {'isvalid': '', 'type': 0}\n\n # prevent from create result if phone number of contact is not available\n if d['data'] == '':\n continue\n try:\n domain_name['domain_name'] = d['data'].split('@')[1]\n local_address['local_address'] = d['data'].split('@')[0]\n properties_list.append(domain_name)\n properties_list.append(local_address)\n except:\n\n domain_name['domain_name'] = ''\n local_address['local_address'] = d['data']\n properties_list.append(domain_name)\n properties_list.append(local_address)\n\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': '', 'label': ''}})\n d['ref']['label'] = \"%s_name\" % contact\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n d['properties'] = properties_list\n special_properties_list.append(is_valid)\n d['special_properties'] = special_properties_list\n result.append(d)\n\n return result", "def email_all():\n\tSubscribtion = session.query(email).all()\n\treturn subscribtion_object", "def email_list(self) -> Sequence[str]:\n return pulumi.get(self, \"email_list\")", "def emails(self):\r\n return emails.Emails(self)", "def select_by_sent_date(begin_date, end_date):\n sql = \"SELECT * FROM dostawy.przesylki WHERE przesylka_dataNadania > %s AND przesylka_dataNadania < %s;\"\n val = (begin_date, end_date)\n rows = DBconnector.fetch_query_parameters(sql, val)\n return _wrap_in_parcel_list(rows)", "def emails(self):\r\n url = api_base + 'emails/'\r\n return json.loads(self.load_url(url))", "def get_email_addresses(user_ids: Set[UserID]) -> Set[Tuple[UserID, str]]:\n return db.session \\\n .query(\n DbUser.id,\n DbUser.email_address,\n ) \\\n .filter(DbUser.id.in_(user_ids)) \\\n .all()", "def get_events(self, from_date=None, to_date=None, owner=None):\n kwargs = {}\n\n if from_date and to_date:\n kwargs['start_datetime__range'] = [from_date, to_date]\n\n if owner:\n if isinstance(owner, Iterable):\n kwargs['owner__in'] = owner\n else:\n kwargs['owner'] = owner\n\n return self.model.objects.filter(**kwargs)" ]
[ "0.77688205", "0.63319296", "0.6218386", "0.6051792", "0.6028005", "0.59827083", "0.59140414", "0.5900727", "0.58886105", "0.5846171", "0.5838072", "0.5797754", "0.57617724", "0.57513386", "0.5745859", "0.5739336", "0.5732516", "0.5727422", "0.5705582", "0.5703198", "0.5688061", "0.5674965", "0.5653088", "0.56501764", "0.56480503", "0.5635583", "0.56189716", "0.5614192", "0.55814433", "0.55555946" ]
0.73141956
1
Add email to the exit survey campaign.
def add_email_to_campaign(survey, email): token = settings.SURVEYGIZMO_API_TOKEN secret = settings.SURVEYGIZMO_API_TOKEN_SECRET if token is None or secret is None: return survey_id = SURVEYS[survey]["exit_survey_id"] campaign_id = SURVEYS[survey]["exit_survey_campaign_id"] try: requests.put( "https://restapi.surveygizmo.com/v2/survey/{survey}" "/surveycampaign/{campaign}/contact?" "semailaddress={email}" "&api_token={token}" "&api_token_secret={secret}".format( survey=survey_id, campaign=campaign_id, email=email, token=token, secret=secret ), timeout=30, ) except requests.exceptions.Timeout: print("Timedout adding: %s" % email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reminder_emails_opt_out(self, reminder_emails_opt_out):\n\n self._reminder_emails_opt_out = reminder_emails_opt_out", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def Add_attendee(self, email):\n if (email not in self.Attendees) and (email not in self.Waitlist):\n self.Attendees[email] = Attendee(email)\n else:\n if email in self.Waitlist:\n print(\"Call Promote_from_waitlist() to move an applicant from \"\n \"the waitlist.\")\n raise PreexistingAddressException(email)", "def save(self, **kwargs):\n\t\tif (self.answer != \"\"):\n\t\t\tself.answered = True\n\t\t\tsuper().save(**kwargs)\n\t\t\tmail_subject = \"Your Question has been answered.\"\n\t\t\t# Do not break the following string or the email will get cut off\n\t\t\tmessage = f\"Hi {self.user.first_name},\\n\\n You're receiving this email because you asked a question about {self.hall.name}. The HonestHalls team has now answered your question! Revisit the {self.hall.name} hall page to see their response.\\n\\n\"\n\n\t\t\temail = EmailMessage(\n\t\t\t\tmail_subject, message, to=[self.user.email]\n\t\t\t)\n\t\t\temail.send()\n\t\telse:\n\t\t\tsuper().save(**kwargs)", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def eap_email():\n\n # skip authorization\n data = request.get_json()\n email_address = data['email_address']\n\n # email address validation\n if not check_valid_email_address(email_address):\n return json.dumps({\"success\": False}), 403\n\n # create object\n subject = '[EAP] - New Inquiry from %s' % email_address\n body = '''<html><head></head><body>%s</body></html>''' % EAP_INQUIRY_BODY.format(email_address)\n email = {\n 'email_from': settings.EMAIL_AUTHOR_PROTECTED,\n 'email_to': settings.EMAIL_AUTHOR_PROTECTED,\n 'subject': subject,\n 'body': body,\n 'cc': [],\n 'sent': False,\n 'num_failures': 0,\n 'errors': []\n }\n\n # insert into mongodb\n email_conn = app.data.driver.db['email']\n email_conn.insert(email)\n\n return json.dumps({\"success\": True}), 201", "def add_submission_email(request, remote_ip, name, rev, submission_pk, message, by, msgtype):\n\n #in_reply_to = form.cleaned_data['in_reply_to']\n # create Message\n parts = pyzmail.parse.get_mail_parts(message)\n body=''\n for part in parts:\n if part.is_body == 'text/plain' and part.disposition == None:\n payload, used_charset = pyzmail.decode_text(part.get_payload(), part.charset, None)\n body = body + payload + '\\n'\n\n msg = submit_message_from_message(message, body, by)\n\n if (submission_pk != None):\n # Must exist - we're adding a message to an existing submission\n submission = Submission.objects.get(pk=submission_pk)\n else:\n # Must not exist\n submissions = Submission.objects.filter(name=name,rev=rev).exclude(state_id='cancel')\n if submissions.count() > 0:\n raise ValidationError(\"Submission {} already exists\".format(name))\n \n # create Submission using the name\n try:\n submission = Submission.objects.create(\n state_id=\"waiting-for-draft\",\n remote_ip=remote_ip,\n name=name,\n rev=rev,\n title=name,\n note=\"\",\n submission_date=datetime.date.today(),\n replaces=\"\",\n )\n from ietf.submit.utils import create_submission_event, docevent_from_submission\n desc = \"Submission created for rev {} in response to email\".format(rev)\n create_submission_event(request, \n submission,\n desc)\n docevent_from_submission(request,\n submission,\n desc)\n except Exception as e:\n log(\"Exception: %s\\n\" % e)\n raise\n\n if msgtype == 'msgin':\n rs = \"Received\"\n else:\n rs = \"Sent\"\n\n desc = \"{} message - manual post - {}-{}\".format(rs, name, rev)\n submission_email_event = SubmissionEmailEvent.objects.create(\n desc = desc,\n submission = submission,\n msgtype = msgtype,\n by = by,\n message = msg)\n #in_reply_to = in_reply_to\n\n save_submission_email_attachments(submission_email_event, parts)\n return submission, submission_email_event", "def test_email_after_contest_end(self):\n self.prep_consumer()\n temp_date = settings.CONTEST_END_DATE\n settings.CONTEST_END_DATE = str(\n datetime.today().date() - timedelta(days=1))\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n log = get_last_db_log(\n 'email_gateway.tasks.send_unqualified_emails', 'EMAIL')\n if log:\n self.fail('Performed task even though contest ended.')\n settings.CONTEST_END_DATE = temp_date", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def set_email_notification(self, hit_type, email, event_types=None):\r\n return self._set_notification(hit_type, 'Email', email, event_types)", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)", "def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):\r\n # Get information from current task's request:\r\n task_id = subtask_status.task_id\r\n\r\n try:\r\n course_email = CourseEmail.objects.get(id=email_id)\r\n except CourseEmail.DoesNotExist as exc:\r\n log.exception(\"Task %s: could not find email id:%s to send.\", task_id, email_id)\r\n raise\r\n\r\n # Exclude optouts (if not a retry):\r\n # Note that we don't have to do the optout logic at all if this is a retry,\r\n # because we have presumably already performed the optout logic on the first\r\n # attempt. Anyone on the to_list on a retry has already passed the filter\r\n # that existed at that time, and we don't need to keep checking for changes\r\n # in the Optout list.\r\n if subtask_status.get_retry_count() == 0:\r\n to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)\r\n subtask_status.increment(skipped=num_optout)\r\n\r\n course_title = global_email_context['course_title']\r\n subject = \"[\" + course_title + \"] \" + course_email.subject\r\n from_addr = _get_source_address(course_email.course_id, course_title)\r\n\r\n course_email_template = CourseEmailTemplate.get_template()\r\n try:\r\n connection = get_connection()\r\n connection.open()\r\n\r\n # Define context values to use in all course emails:\r\n email_context = {'name': '', 'email': ''}\r\n email_context.update(global_email_context)\r\n\r\n while to_list:\r\n # Update context with user-specific values from the user at the end of the list.\r\n # At the end of processing this user, they will be popped off of the to_list.\r\n # That way, the to_list will always contain the recipients remaining to be emailed.\r\n # This is convenient for retries, which will need to send to those who haven't\r\n # yet been emailed, but not send to those who have already been sent to.\r\n current_recipient = to_list[-1]\r\n email = current_recipient['email']\r\n email_context['email'] = email\r\n email_context['name'] = current_recipient['profile__name']\r\n\r\n # Construct message content using templates and context:\r\n plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)\r\n html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)\r\n\r\n # Create email:\r\n email_msg = EmailMultiAlternatives(\r\n subject,\r\n plaintext_msg,\r\n from_addr,\r\n [email],\r\n connection=connection\r\n )\r\n email_msg.attach_alternative(html_msg, 'text/html')\r\n\r\n # Throttle if we have gotten the rate limiter. This is not very high-tech,\r\n # but if a task has been retried for rate-limiting reasons, then we sleep\r\n # for a period of time between all emails within this task. Choice of\r\n # the value depends on the number of workers that might be sending email in\r\n # parallel, and what the SES throttle rate is.\r\n if subtask_status.retried_nomax > 0:\r\n sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)\r\n\r\n try:\r\n log.debug('Email with id %s to be sent to %s', email_id, email)\r\n\r\n with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):\r\n connection.send_messages([email_msg])\r\n\r\n except SMTPDataError as exc:\r\n # According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.\r\n if exc.smtp_code >= 400 and exc.smtp_code < 500:\r\n # This will cause the outer handler to catch the exception and retry the entire task.\r\n raise exc\r\n else:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n except SINGLE_EMAIL_FAILURE_ERRORS as exc:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n else:\r\n dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])\r\n if settings.BULK_EMAIL_LOG_SENT_EMAILS:\r\n log.info('Email with id %s sent to %s', email_id, email)\r\n else:\r\n log.debug('Email with id %s sent to %s', email_id, email)\r\n subtask_status.increment(succeeded=1)\r\n\r\n # Pop the user that was emailed off the end of the list only once they have\r\n # successfully been processed. (That way, if there were a failure that\r\n # needed to be retried, the user is still on the list.)\r\n to_list.pop()\r\n\r\n except INFINITE_RETRY_ERRORS as exc:\r\n dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_nomax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_nomax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True\r\n )\r\n\r\n except LIMITED_RETRY_ERRORS as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # Errors caught are those that indicate a temporary condition that might succeed on retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n except BULK_EMAIL_FAILURE_ERRORS as exc:\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n num_pending = len(to_list)\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with \"fatal\" exception. %d emails unsent.',\r\n task_id, email_id, num_pending)\r\n # Update counters with progress to date, counting unsent emails as failures,\r\n # and set the state to FAILURE:\r\n subtask_status.increment(failed=num_pending, state=FAILURE)\r\n return subtask_status, exc\r\n\r\n except Exception as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # These are unexpected errors. Since they might be due to a temporary condition that might\r\n # succeed on retry, we give them a retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',\r\n task_id, email_id)\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n else:\r\n # All went well. Update counters with progress to date,\r\n # and set the state to SUCCESS:\r\n subtask_status.increment(state=SUCCESS)\r\n # Successful completion is marked by an exception value of None.\r\n return subtask_status, None\r\n finally:\r\n # Clean up at the end.\r\n connection.close()", "async def add_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list.keys():\n await ctx.send(\n \"There is already an email address configured, \"\n \"Please use update command to update it..!\")\n return\n else:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been configured successfully..!\")", "def action_invite(self):\n self.ensure_one()\n\n if not self.env.user.email:\n raise UserError(_(\"Unable to post message, please configure the sender's email address.\"))\n\n mail_values = []\n for partner_id in self.partner_ids:\n slide_channel_partner = self.channel_id._action_add_members(partner_id)\n if slide_channel_partner:\n mail_values.append(self._prepare_mail_values(slide_channel_partner))\n\n # TODO awa: change me to create multi when mail.mail supports it\n for mail_value in mail_values:\n self.env['mail.mail'].sudo().create(mail_value)\n\n return {'type': 'ir.actions.act_window_close'}", "def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def test_send_email_on_invite(self):\n\n league = self.create_league()\n\n season = self.create_season(league)\n team = self.create_team(season)\n\n player = self.create_player()\n\n send_user_email_on_join(player, team.id)\n\n self.assertEqual(len(mail.outbox), 1)\n\n # if testing manually:\n # import pathlib\n # pathlib.Path(\"test_email.html\").write_text(last_sent.body)", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)" ]
[ "0.5716118", "0.5541047", "0.5525604", "0.5408878", "0.540882", "0.53971654", "0.53807753", "0.5330207", "0.525575", "0.525575", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5244311", "0.5234358", "0.5233025", "0.52278787", "0.5195473", "0.51805544", "0.51565224", "0.51437163", "0.5138165", "0.5125941", "0.5115815" ]
0.65830237
0
Collect and aggregate the exit survey results for the date.
def get_exit_survey_results(survey, date): token = settings.SURVEYGIZMO_API_TOKEN secret = settings.SURVEYGIZMO_API_TOKEN_SECRET answers = [] page = 1 more_pages = True survey_id = SURVEYS[survey]["exit_survey_id"] # Aggregate results. summary = { "yes": 0, "no": 0, "dont-know": 0, } # Can't do anything without credentials. if token is None or secret is None: return summary while more_pages: response = requests.get( "https://restapi.surveygizmo.com/v2/survey/{survey}" "/surveyresponse?" "filter[field][0]=datesubmitted" "&filter[operator][0]=>=&filter[value][0]={start}+0:0:0" "&filter[field][1]=datesubmitted" "&filter[operator][1]=<&filter[value][1]={end}+0:0:0" "&filter[field][2]=status&filter[operator][2]==" "&filter[value][2]=Complete" "&resultsperpage=500" "&page={page}" "&api_token={token}" "&api_token_secret={secret}".format( survey=survey_id, start=date, end=date + timedelta(days=1), page=page, token=token, secret=secret, ), timeout=300, ) results = json.loads(response.content) total_pages = results.get("total_pages", 0) more_pages = page < total_pages answers = answers + [r.get("[question(2)]") for r in results.get("data", [])] page += 1 for answer in answers: lower_stripped = answer.lower().strip() if lower_stripped in ["no", "yes"]: summary[lower_stripped] += 1 else: summary["dont-know"] += 1 return summary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_results(self):\n\n raise NotImplementedError", "def clean_cases(data):\n newdata=[]\n #Add up Bucks Data\n bucks=defaultdict(list)\n for i in data:\n if i['areaName'] in ['Chiltern','Aylesbury Vale','South Bucks','Wycombe']:\n bucks[i['date']].append(i)\n else:\n newdata.append(i)\n log.debug(bucks)\n for _date,_all in bucks.items():\n item={'areaName': 'Buckinghamshire','areaCode':'E06000060','specimenDate':_date}\n item['newCasesBySpecimenDate']=sum([x['newCasesBySpecimenDate'] for x in _all])\n item['cumCasesBySpecimenDate']=sum([x['cumCasesBySpecimenDate'] for x in _all])\n newdata.append(item)\n\n return newdata", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()", "def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput", "def last_days_results(self, days):\n return self.security['Date', 'Close', 'FinalDecision'][-days:]", "def aggregate_results(observers):\n return None", "def count_occurrences_per_day(measurehours=[8,15,23], untiltoday=False, savedatafile=True, savetype='excel', overwrite=False, verbose=True,\n filename='lungemedLPR3dataframe', datafileext=None):\n savepath = 'O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\plots\\\\'\n if os.path.isfile(savepath+filename) and savedatafile and not overwrite:\n sys.exit(' Was asked to store data but overwrite=False and file already exists... hence exiting')\n\n if verbose: print(' - Getting the data to look at ')\n dataframe_days, dataframe_vis = lbv.getdata(verbose=verbose, filenameext=datafileext)\n outdic = {}\n\n for measurehour in measurehours:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if ('SLA' in datafileext) or ('SUH' in datafileext):\n start_day = datetime.datetime.strptime(\"09-03-2022 \" + str(measurehour) + \":00:00\", \"%d-%m-%Y %H:%M:%S\")\n else:\n start_day = datetime.datetime.strptime(\"02-02-2019 \"+str(measurehour)+\":00:00\", \"%d-%m-%Y %H:%M:%S\")\n\n if untiltoday:\n end_day = datetime.datetime.strptime(str(datetime.datetime.today()).split(' ')[0]+' '+str(measurehour)+\":00:00\", \"%Y-%m-%d %H:%M:%S\")\n elif ('SLA' in datafileext) or ('SUH' in datafileext):\n end_day = datetime.datetime.strptime(np.str(dataframe_days['INDTIDSPUNKT_DRGKONTAKT'].max()+datetime.timedelta(days=2)).split(' ')[0]+' '+str(measurehour)+\":00:00\", \"%Y-%m-%d %H:%M:%S\")\n else:\n end_day = datetime.datetime.strptime(\"02-05-2019 \" + str(measurehour) + \":00:00\", \"%d-%m-%Y %H:%M:%S\")\n date_list = [start_day + datetime.timedelta(days=x) for x in range(0, (end_day - start_day).days)]\n\n if verbose: print(' - Will count how many patients are in beds at any given day between '+\n start_day.strftime(\"%d-%m-%Y\")+' and '+end_day.strftime(\"%d-%m-%Y\")+' at '+str(measurehour)+\" o'clock\")\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('---- \"Counting parameters from \"bed days data frame\" ----')\n count_cpr = [0] * len(date_list)\n occupancy_available = [0] * len(date_list)\n occupancy_actual = [0] * len(date_list)\n\n for pp, patient in enumerate(dataframe_days['CPR']):\n intime = dataframe_days['INDTIDSPUNKT_DRGKONTAKT'][pp]\n outtime = dataframe_days['UDTIDSPUNKT_DRGKONTAKT'][pp]\n\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if verbose:\n infostr = ' Checking the date '+datecheck.strftime(\"%d-%m-%Y\")+' for patient number '+str(pp+1)\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n\n if (intime <= datecheck) and (datecheck <= outtime):\n count_cpr[dd] = count_cpr[dd] + 1\n\n if verbose: print('\\n - Estimating the occupancy in the available and actual beds ')\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if 'SUH' in datafileext:\n if (datecheck > datetime.datetime.strptime(\"10-03-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) & \\\n (datecheck < datetime.datetime.strptime(\"27-06-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")):\n NbedsSUH = 18\n elif (datecheck > datetime.datetime.strptime(\"27-06-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"08-08-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")): # sommer lavaktivitet\n NbedsSUH = 14\n elif (datecheck > datetime.datetime.strptime(\"24-12-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"02-01-2023 00:00:00\", \"%d-%m-%Y %H:%M:%S\")): # jul lavaktivitet\n NbedsSUH = 14\n elif (datecheck > datetime.datetime.strptime(\"02-01-2023 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"01-01-2024 00:00:00\", \"%d-%m-%Y %H:%M:%S\")):\n NbedsSUH = 20\n else:\n NbedsSUH = 18\n\n occupancy_available[dd] = count_cpr[dd] / NbedsSUH * 100\n elif 'SLA' in datafileext:\n occupancy_available[dd] = count_cpr[dd] / 24. * 100\n else:\n if datecheck < datetime.datetime.strptime(\"10-06-2021 00:00:00\", \"%d-%m-%Y %H:%M:%S\"):\n occupancy_available[dd] = count_cpr[dd] / 24. * 100\n else:\n occupancy_available[dd] = count_cpr[dd] / 16. * 100\n\n if datecheck < datetime.datetime.strptime(\"01-03-2021 00:00:00\", \"%d-%m-%Y %H:%M:%S\"):\n occupancy_actual[dd] = count_cpr[dd] / 24. * 100\n else:\n occupancy_actual[dd] = count_cpr[dd] / 16. * 100\n\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('---- \"Counting parameters from \"visitations data frame\" ----')\n count_vis_aka = [0] * len(date_list)\n count_vis_lungNAE = [0] * len(date_list)\n count_vis_lungSLA = [0] * len(date_list)\n count_vis_other = [0] * len(date_list)\n\n for pp, patient in enumerate(dataframe_vis['CPR']):\n intime = dataframe_vis['INDTIDSPUNKT_DRGKONTAKT'][pp]\n outtime = dataframe_vis['UDTIDSPUNKT_DRGKONTAKT'][pp]\n\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if verbose:\n infostr = ' Checking the date ' + datecheck.strftime(\n \"%d-%m-%Y\") + ' for patient number ' + str(pp + 1)\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n\n if intime.strftime(\"%d-%m-%Y\") == datecheck.strftime(\"%d-%m-%Y\"):\n if ('Akut Afd. 1.sal, Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Akut Afd., Skadestue, SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Akut Afd.stuen, Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_aka[dd] = count_vis_aka[dd] + 1\n elif ('Lungemed. Sengeafs., NAE'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Med. Lunge Sengeafs., NAE'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_lungNAE[dd] = count_vis_lungNAE[dd] + 1\n elif ('Med. Lunge Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_lungSLA[dd] = count_vis_lungSLA[dd] + 1\n else:\n count_vis_other[dd] = count_vis_other[dd] + 1\n\n if verbose: print(' - Adding results to output dictionary')\n outdic['dates_'+str(measurehour)] = date_list\n outdic['count_cpr_'+str(measurehour)] = count_cpr\n outdic['occupancy_available_'+str(measurehour)] = occupancy_available\n outdic['occupancy_actual_'+str(measurehour)] = occupancy_actual\n outdic['count_vis_aka_'+str(measurehour)] = count_vis_aka\n outdic['count_vis_lungNAE_'+str(measurehour)] = count_vis_lungNAE\n outdic['count_vis_lungSLA_'+str(measurehour)] = count_vis_lungSLA\n outdic['count_vis_other_'+str(measurehour)] = count_vis_other\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('\\n - Building data frame and returning count of patients and stats')\n df_results = pd.DataFrame(outdic)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - calculating moving average (30 days window)')\n Ndaysavg = 30\n for measurehour in measurehours:\n df_results['occupancy_available_movingavg_' + str(measurehour)] = \\\n df_results['occupancy_available_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n df_results['occupancy_actual_movingavg_' + str(measurehour)] = \\\n df_results['occupancy_actual_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n\n if verbose: print(' - calculating moving average (5 days window)')\n Ndaysavg = 5\n for measurehour in measurehours:\n df_results['occupancy_available_movingavg5days_' + str(measurehour)] = \\\n df_results['occupancy_available_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n df_results['occupancy_actual_movingavg5days_' + str(measurehour)] = \\\n df_results['occupancy_actual_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n\n if savedatafile:\n if savetype == 'excel':\n gdf.savefile(df_results, savepath + filename, format='excel', overwrite=overwrite, verbose=verbose)\n else:\n gdf.savefile(df_results, savepath + filename, format='csv', overwrite=overwrite, verbose=verbose)\n\n return df_results", "def collect():\n datadir = 'data'\n if 'OUTPUT_DATA_DIR' in os.environ:\n datadir = os.environ['OUTPUT_DATA_DIR']\n\n scraper_dir = os.path.join(os.getcwd(), 'scrapers')\n scrapers = get_scraper_list(scraper_dir)\n now = datetime.now()\n total_deals = []\n for scr_instance in scrapers:\n deals = scr_instance.get_deals()\n\n # Map a timestamp on each deal\n for item in deals:\n item.update({'timestamp': now.strftime('%Y-%m-%d')})\n\n print(\"\\n Collected {0} deals for {1} \\n\\n\".format(len(deals), scr))\n\n total_deals += deals\n\n filename = '{0}_resultset.json'.format(now.strftime('%Y%m%d_%H%I%S'))\n\n fh = open(os.path.join(datadir, filename), 'w+')\n fh.write(json.dumps(total_deals))\n fh.close()", "def finalize_survey(self, **kwargs):", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def cleanup_queries(results):\n data = []\n for result in results:\n result.__dict__.pop('_sa_instance_state', 'None')\n item = result.__dict__\n if 'date' in item and item['date']:\n t = item['date']\n formatted_date = t.strftime('%m/%d/%Y')\n item['date'] = formatted_date\n data.append(item)\n return data", "def validation_report(self):\n\n day_related_checks = [InvCode.FLAG_DAY_SHORT_SLEEP, InvCode.FLAG_DAY_LONG_SLEEP, InvCode.FLAG_DAY_WITHOUT_DIARY,\n InvCode.FLAG_DAY_NON_WEARING, InvCode.FLAG_DAY_NOT_ENOUGH_VALID_EPOCHS,\n InvCode.FLAG_DAY_NOT_ENOUGH_CONSECUTIVE_DAYS]\n\n total_days = 0\n for check in day_related_checks:\n n_days_check_failed = 0\n for wearable in self.wearables.values():\n wearable.data[\"_tmp_flag_\"] = self._flag_list_OR(wearable, [check])\n n_days_check_failed += wearable.data.groupby([wearable.experiment_day_col])[\"_tmp_flag_\"].all().sum()\n total_days += n_days_check_failed\n print(\"Number of days removed due to %s: %d\" % (check, n_days_check_failed))\n\n print(\"Total number of potential days to remove (may have overlaps): %d\" % total_days)", "def case_diagnostics_onaccept(form):\n\n # Get record ID\n form_vars = form.vars\n if \"id\" in form_vars:\n record_id = form_vars.id\n elif hasattr(form, \"record_id\"):\n record_id = form.record_id\n else:\n return\n\n db = current.db\n s3db = current.s3db\n\n # Get the record\n table = s3db.disease_case_diagnostics\n query = (table.id == record_id)\n record = db(query).select(table.site_id,\n table.disease_id,\n table.result_date,\n limitby = (0, 1),\n ).first()\n if not record:\n return\n\n site_id = record.site_id\n disease_id = record.disease_id\n result_date = record.result_date\n\n if site_id and disease_id and result_date:\n\n # Count records grouped by result\n query = (table.site_id == site_id) & \\\n (table.disease_id == disease_id) & \\\n (table.result_date == result_date) & \\\n (table.deleted == False)\n cnt = table.id.count()\n rows = db(query).select(table.result,\n cnt,\n groupby = table.result,\n )\n total = positive = 0\n for row in rows:\n num = row[cnt]\n total += num\n if row.disease_case_diagnostics.result == \"POS\":\n positive += num\n\n # Look up the daily report\n rtable = s3db.disease_testing_report\n query = (rtable.site_id == site_id) & \\\n (rtable.disease_id == disease_id) & \\\n (rtable.date == result_date) & \\\n (rtable.deleted == False)\n report = db(query).select(rtable.id,\n rtable.tests_total,\n rtable.tests_positive,\n limitby = (0, 1),\n ).first()\n\n if report:\n # Update report if actual numbers are greater\n if report.tests_total < total or report.tests_positive < positive:\n report.update_record(tests_total = total,\n tests_positive = positive,\n )\n else:\n # Create report\n report = {\"site_id\": site_id,\n \"disease_id\": disease_id,\n \"date\": result_date,\n \"tests_total\": total,\n \"tests_positive\": positive,\n }\n report_id = rtable.insert(**report)\n if report_id:\n current.auth.s3_set_record_owner(rtable, report_id)\n report[\"id\"] = report_id\n s3db.onaccept(rtable, report, method=\"create\")", "def get_exceptions_results(self, results, include_time_in_key=False):\n agent_exceptions = {}\n for result in results:\n exc_tag = result.get(3)\n if exc_tag not in agent_exceptions:\n agent_exceptions[exc_tag] = {}\n\n if include_time_in_key:\n # use hours and minutes only\n time = re.compile(\"([0-9]+:[0-9]+).+\").search(result.get(2))[1]\n key = \"{}_{}\".format(result.get(1), time)\n else:\n key = str(result.get(1))\n\n if key not in agent_exceptions[exc_tag]:\n agent_exceptions[exc_tag][key] = 0\n\n agent_exceptions[exc_tag][key] += 1\n\n if not agent_exceptions:\n return\n\n for exc_type in agent_exceptions:\n agent_exceptions_sorted = {}\n for k, v in sorted(agent_exceptions[exc_type].items(),\n key=lambda x: x[0]):\n agent_exceptions_sorted[k] = v\n\n agent_exceptions[exc_type] = agent_exceptions_sorted\n\n return agent_exceptions", "def process_employee_exit(self):\n if self.is_employee_serving():\n self._end_date.append(datetime.now().isoformat())\n\n print(f\"Successfully processed exit for employee {self.name} on\" \\\n f\"{self._end_date[-1]}\\nWe wish {self.name} for future endeavours\")\n return\n raise RejoiningException(\"Employee not in service. Cannot process exit.\")", "def endRound(self):\n # first copy\n resultsList = []\n try:\n # calculate research rolls\n researchRolls = anwp.func.funcs.getRandomD100Rolls(100)\n \n # Process for Each Empire\n for empireID, myEmpire in self.empires.iteritems():\n # build industry\n resultsList.append('%s(%s) - buildIndustry:%s' % (myEmpire.name, empireID, myEmpire.buildIndustry()))\n \n # generate income and research points\n resultsList.append('%s(%s) - processSystems:%s' % (myEmpire.name, empireID, myEmpire.processSystems()))\n \n # calculate research orders\n resultsList.append('%s(%s) - calcResearch:%s' % (myEmpire.name, empireID, myEmpire.calcResearch(researchRolls)))\n \n # calculate diplomacy\n resultsList.append('%s(%s) - checkDiplomacy:%s' % (myEmpire.name, empireID, myEmpire.checkDiplomacy()))\n \n # Process trade routes\n resultsList.append('Process Trade Routes:%s' % self.processTradeRoutes())\n \n # Process Market Orders\n resultsList.append('Process Market Orders:%s' % self.processMarketOrders())\n \n # Process all Ship Battles\n resultsList.append('Process Ship Battles:%s' % self.processShipBattles())\n \n # Process all Ground Battles\n resultsList.append('Process Ground Battles:%s' % self.processGroundBattles())\n \n # reset galaxy data\n resultsList.append('Reset galaxy data, increment round')\n self.printResults(resultsList)\n self.resetData()\n return 1\n except:\n return 'galaxy->endRound error'", "def build_date_result(self, result, statistics, key):\n import dateutil.parser\n\n # Append result for each date\n for statistic in statistics:\n date = dateutil.parser.parse(str(statistic['date']))\n date = date.strftime('%Y-%m-%d')\n if date not in result:\n result[date] = []\n result[date].append({key: statistic})\n\n return result", "def collect_data(self,\n day_selection,\n exposure_schedule=[1.0],\n year_selection=[0],\n units=[\"SED\"],\n bin_width=None):\n\n # this subroutine handles keyword inputs (monthly, seasonal, etc)\n self.day_selection, self.day_input_flt, self.day_nonstring = str2daysofyear(day_selection)\n\n self.exposure_schedule = exposure_schedule\n\n self.year_selection = year_selection\n\n if units is not None :\n self.units = units\n \n if bin_width is not None :\n self.bin_width = bin_width\n\n self = self.interpret_parameters()\n\n ############################################################################\n\n lengths = {'day_selection' : len(self.day_selection),\n 'exposure_schedule' : len(self.exposure_schedule),\n 'year_selection' : len(self.year_selection),\n 'units' : len(self.units),\n 'bin_width' : len(self.bin_width)}\n\n self.num_hists = max(lengths.items(), key=lambda x: x[1])[1]\n assert all(x == self.num_hists or x == 1 for x in lengths.values()), (\n \"Inputs must be lists of length 1 or num_hists\")\n \n self.iterators = [x[0] for x in lengths.items() if x[1]==self.num_hists]\n\n\n self.hist_specs = []\n\n for i in range(self.num_hists) :\n hist_spec = {\n 'day_selection' : self.day_selection[0],\n 'exposure_schedule' : self.exposure_schedule[0],\n 'year_selection' : self.year_selection[0],\n 'units' : self.units[0],\n 'bin_width' : self.bin_width[0]}\n for x in self.iterators :\n hist_spec[x] = self.__dict__[x][i]\n self.hist_specs = self.hist_specs + [hist_spec]\n \n \n # find unique years to be loaded (probably all years but have to check)\n unique_years = set(self.year_selection[0])\n if len(self.year_selection) > 1 :\n for i in range(1,len(self.year_selection)) :\n unique_years.update(self.year_selection[i])\n unique_years = sorted(unique_years)\n\n # declare empty hists\n self.hists = [None for x in range(self.num_hists)]\n\n for i in range(len(unique_years)) :\n year = unique_years[i]\n print(\"Processing year \"+str(year)) #should use logging, don't yet know how\n dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year))) \n dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)\n\n if i == 0 :\n # TODO: this should also be done by some initial dataset analysis, but that's a drastic\n # design overhaul\n self.lat = dataset['lat'][:]\n self.lon = dataset['lon'][:]\n\n # now to determine the unique days for the specific year\n unique_days = set()\n for j in range(self.num_hists) :\n if year in self.hist_specs[j]['year_selection'] :\n unique_days.update(self.hist_specs[j]['day_selection'])\n unique_days = sorted(unique_days)\n\n # TODO: when metadata fixed, update this to actually interpret dates (cftime)\n # reformat to index for netCDF\n nc_day_sel = [False for i in range(365*24)] \n # reshape false array to have first dimension 24 (hours in day)\n nc_day_sel = assert_data_shape_24(nc_day_sel) \n # set the appropriate days as true\n nc_day_sel[:,np.array(unique_days)-1] = True \n # correct for leap years (skip feb 29)\n if year % 4 == 0 :\n nc_day_sel = np.concatenate(\n (nc_day_sel[:,0:59],np.full((24,1),False),nc_day_sel[:,59:]),axis=1)\n # flatten time_subset array back to one dimension\n nc_day_sel = nc_day_sel.flatten(order='F')\n\n #load data\n data_year = assert_data_shape_24(dataset['UV_AS'][nc_day_sel,:,:])\n\n #sort data into histograms\n for j in range(self.num_hists) :\n if year in self.hist_specs[j]['year_selection'] :\n sub_day_sel = [ True if x in self.hist_specs[j]['day_selection'] \n else False for x in unique_days ]\n temp_data = data_year[:,sub_day_sel,:,:]\n\n # Apply the exposure schedule, differently for doses vs intensity\n if self.hist_specs[j]['units'] in [\"SED\",\"J m-2\",\"UVIh\"] :\n # if calculating doses\n print(' Calculating doses')\n temp_data = np.sum(np.reshape(\n self.hist_specs[j]['exposure_schedule'],[24,1,1,1]) * temp_data,axis=0)\n # more complex when doing intensity\n else :\n # assume elsewise calculating intensity (i.e. UV-index) then limit data selection\n # to schedule (remembering that default schedule is just ones)\n print(' Slicing data with exposure schedule')\n # select only those hours with nonzero entry in exposure schedule\n temp_data = temp_data[self.hist_specs[j]['exposure_schedule'] != 0,:,:,:]\n # select nonzero values from exposure schedule\n exposure_schedule_nonzero = self.hist_specs[j]['exposure_schedule'][\n self.hist_specs[j]['exposure_schedule'] != 0]\n # if any nonzero entries aren't 1, multiply data accordingly\n if (exposure_schedule_nonzero != 1).any() :\n temp_data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])\n # recombine first two dimensions (hour and day) back into time ready for histogram\n temp_data = assert_data_shape_24(temp_data,reverse=True) \n\n # now multiply data by conversion factor according to desired untis\n # TODO: Should expand upon this in reference files\n temp_data *= {\"SED\":0.9, \"J m-2\":90, \"UVIh\":1, \"UVI\":1, \"W m-2\":0.025, \"mW m-2\":25}[self.hist_specs[j]['units']]\n\n # if this is the first iteration, declare a hist\n if 'num_bins' not in self.hist_specs[j] :\n # seems like useful metadata to know bin n and edges\n self.hist_specs[j]['num_bins'] = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width'] ) + 2\n self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))\n - 0.5) * self.hist_specs[j]['bin_width'] \n # this form allows for weird custom bin edges, but probably will never use that\n self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1] \n + 0.5 * np.diff(self.hist_specs[j]['bin_edges']))\n\n # TODO: think about possible cases where dimensions could differ\n self.hists[j]=np.zeros([self.hist_specs[j]['num_bins'],\n np.shape(temp_data)[-2],np.shape(temp_data)[-1]], dtype=np.int16)\n\n else :\n new_num_bins = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width']) + 2 - self.hist_specs[j]['num_bins']\n # check if new data requires extra bins in pix_hist\n if new_num_bins > 0 :\n # append zeros to pix hist to make room for larger values\n self.hists[j] = np.concatenate((self.hists[j],np.zeros(\n [new_num_bins,np.shape(self.hists[j])[-2],np.shape(self.hists[j])[-1]],\n dtype=np.int16)),axis=0)\n # update bin information\n self.hist_specs[j]['num_bins'] = self.hist_specs[j]['num_bins'] + new_num_bins\n self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))\n - 0.5) * self.hist_specs[j]['bin_width'] \n self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1] \n + 0.5 * np.diff(self.hist_specs[j]['bin_edges']))\n\n # TODO: Add check in case bins get \"full\" (i.e. approach int16 max value)\n # now put data into hist using apply_along_axis to perform histogram for each pixel\n print(\" Calculating and adding to pixel histograms\")\n self.hists[j][:,:,:] += np.apply_along_axis(lambda x: \n np.histogram(x,bins=self.hist_specs[j]['bin_edges'])[0],0,temp_data)\n\n return self", "def find_by_date(self):\n clear_screen()\n while True:\n self.date = input(\"Which date would you like to look at, ex: MM/DD/\"\n \"YYYY? Or you can find all dates including and between two \"\n \"dates, ex: MM/DD/YYYY - MM/DD/YYYY. Or Q to quit to the main \"\n \"screen.: \")\n if self.date.strip().upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n break\n #if the user put a range of dates it will go into this option.\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}\\s?[-]\\s?[0-1]'\n '[0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n self.date_one = re.search(r'([0-1][0-9]/[0-3][0-9]/[1-2]'\n '[0-9]{3})\\s?[-]\\s?',self.date)\n self.date_two = re.search(r'\\s?[-]\\s?([0-1][0-9]/[0-3][0-9]/'\n '[1-2][0-9]{3})', self.date)\n clear_screen() \n self.dates_to_print = \"Results for dates including and between \"\n \"{} - {}.\".format(self.date_one.group(1), self.date_two.group(1))\n self.date_one = datetime.datetime.strptime(self.date_one.group(1),\n '%m/%d/%Y')\n self.date_two = datetime.datetime.strptime(self.date_two.group(1),\n '%m/%d/%Y')\n self.find_by_date_list = []\n a = 0\n #finds the dates that are in between the two entered dates.\n for i in self.dict_list:\n self.this_date = datetime.datetime.strptime(i[\"date\"], \n '%m/%d/%Y %H:%M')\n if self.date_one <= self.this_date <= self.date_two:\n self.find_by_date_list.append(i) \n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list, \n dates=self.dates_to_print)\n self.del_or_edit()\n break\n #if user entered a single date, this option will be triggered\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n print(\"Results for the date {}.\".format(self.date))\n self.find_by_date_list = []\n a = 0\n for i in self.dict_list:\n if re.search(self.date, i[\"date\"]):\n self.find_by_date_list.append(i)\n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list)\n self.del_or_edit()\n break\n else:\n print(\"{} is not an acceptable date.\".format(self.date))\n print(\"\")", "def query_serp(query:str, site:str, dates:list, num_results:int, paper_name:str) -> list:\n all_sites = []\n total_sites_count = 0\n\n for d in dates:\n try:\n # Get query dict and params dict\n query_r, params = make_params(query=query, site=site, date_start=d[0], date_end=d[1],\n num_results=num_results, paper=paper_name)\n # serpAPI query\n client = GoogleSearchResults(params)\n results = client.get_dict()\n news_results = results['news_results']\n\n count = 0\n sites_date = []\n # Loop through till end of search results or error encountered\n while (news_results and len(news_results)>0) or ('error' not in results):\n sites = [news['link'] for news in news_results]\n sites_date.extend(sites)\n count+=len(sites)\n\n params['start'] = count\n client = GoogleSearchResults(params)\n results = client.get_dict()\n news_results = results['news_results']\n\n print('Date Range: {}-{}\\tTotal Sites: {}'.format(d[0],d[1],len(sites_date)))\n\n # add list of sites to query dict\n query_r['sites'] = sites_date\n all_sites.append(query_r)\n total_sites_count += len(sites_date)\n except Exception as e:\n print(e)\n print(d)\n continue\n print('Total Sites: {}'.format(total_sites_count))\n return all_sites", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def collect(self, day: datetime) -> Dict:\n LOG.info(f'Collecting stats for {self.name} on {day.isoformat()}')\n collected = self._collect(day)\n LOG.debug(f'Collection for {self.name} complete')\n return collected", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "def _compute_results(self):\n self.ensure_one()\n Result = self.env['pabi.common.supplier.payment.report.view']\n dom = [('invoice_id.source_document_type', 'in',\n ['advance', 'expense']),\n ('expense_id.pay_to', '!=', 'supplier')]\n if self.user_ids:\n dom += [('voucher_id.validate_user_id', 'in', self.user_ids.ids)]\n if self.source_document_type:\n dom += [('invoice_id.source_document_type', '=',\n self.source_document_type)]\n if self.fiscalyear_start_id:\n dom += [('voucher_id.date', '>=',\n self.fiscalyear_start_id.date_start)]\n if self.fiscalyear_end_id:\n dom += [('voucher_id.date', '<=',\n self.fiscalyear_end_id.date_stop)]\n if self.period_start_id:\n dom += [('voucher_id.date', '>=',\n self.period_start_id.date_start)]\n if self.period_end_id:\n dom += [('voucher_id.date', '<=',\n self.period_end_id.date_stop)]\n if self.date_start:\n dom += [('voucher_id.date', '>=', self.date_start)]\n if self.date_end:\n dom += [('voucher_id.date', '<=', self.date_end)]\n self.results = Result.search(\n dom, order=\"fiscalyear,voucher_number,invoice_number\")", "def get_single_day_information(self, single_date):\n self.end_date_ordinal = single_date.toordinal()\n self.correct_list_to_end_date()\n return self.all_students_dict", "def ingest(self,check=True):\n\t\tdata=self.data_all\n\t\tpubdate=time_utils.parseISO(self.api.last_update).date()\n\t\t\n\t\tcounter=0\n\t\tfor item in data:\n\t\t\tareacode=item['areaCode']\n\t\t\tdatestring=item['specimenDate']\n\t\t\t_date=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=_date,areacode=areacode)\n\t\t\trow.areaname=item['areaName']\n\t\t\tdaily=item['newCasesBySpecimenDate']\n\t\t\ttotal=item['cumCasesBySpecimenDate']\n\t\t\t\n\t\t\t#log.debug(f'{row.areaname}: {datestring}')\t\t\t\n\t\t\tif created:\n\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\trow.save()\n\t\t\t\t\n\t\t\t\tif daily:\n\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\tlog.debug(f'date:{_date} lag: {lag} daily:{daily}')\n\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\tdrow.add_cases=daily #if a new daily case, assume no prior report\n\t\t\t\t\tdrow.save()\n\t\t\t\n\t\t\tif not created:\n\t\t\t\texisting_daily=row.dailyLabConfirmedCases\n\t\t\t\texisting_total=row.totalLabConfirmedCases\n\t\t\t\tif daily is not None:\n\t\t\t\t\tif existing_daily !=daily or existing_total!=total:\n\t\t\t\t\t\trow.dailyLabConfirmedCases=daily\n\t\t\t\t\t\trow.totalLabConfirmedCases=total\n\t\t\t\t\t\trow.save()\n\t\t\t\t\t\tif existing_daily !=daily:\n\t\t\t\t\t\t\tlog.debug(f'Updating {row.areaname} on {datestring}: Daily: {existing_daily} to {daily} Total: {existing_total} to {total}')\n\t\t\t\t\t\t\tif existing_daily:\n\t\t\t\t\t\t\t\t_increase=daily-existing_daily\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t_increase=daily\n\t\t\t\t\t\t\tlag=(pubdate-_date.date()).days\n\t\t\t\t\t\t\tdrow,dcreated=DailyReport.objects.get_or_create(specimenDate=_date,areacode=areacode,publag=lag)\n\t\t\t\t\t\t\tdrow.dailycases=daily\n\t\t\t\t\t\t\tdrow.add_cases=_increase\n\t\t\t\t\t\t\tdrow.save()\n\t\t\t\t\t\n\t\t\tcounter+=1\n\t\t\tif counter%1000==0:\n\t\t\t\tlog.info(f'Processing row {counter}')\n\t\tlog.info(f'Processed: {counter} rows')\n\n\t\tif self.edition:\n\t\t\tconfigs.userconfig.update('PHE','latest_update',self.edition)\n\t\telse:\n\t\t\tlog.info('Latest update not updated')", "def collect_data_date(self, date=None):\n if date is None:\n date = self.date\n # TODO make it so it doenst re-collect all data and just adds historical's data\n self.collect_all_stock_data()", "def process_results(self, results):\n issues = {}\n for service in SERVICE_RESOURCES:\n for agent in SERVICE_RESOURCES[service]['daemons']:\n _results = results.find_by_tag(agent)\n ret = self.get_exceptions_results(_results)\n if ret:\n if service not in issues:\n issues[service] = {}\n\n issues[service][agent] = ret\n\n if issues:\n self._output['agent-exceptions'] = issues", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))" ]
[ "0.5402203", "0.5367766", "0.5359192", "0.524809", "0.5185131", "0.51527995", "0.51476175", "0.5144919", "0.51337653", "0.5064276", "0.50609916", "0.5050773", "0.49764916", "0.496732", "0.493797", "0.49316752", "0.49218276", "0.48943838", "0.48835677", "0.48830566", "0.48685098", "0.4854798", "0.48372862", "0.482055", "0.48114845", "0.48004153", "0.4792487", "0.47829798", "0.4780088", "0.47591257" ]
0.753436
0
1. Check each peer's genesis block 2. Generate new blocks on each peer 2.1. 2 blocks on peer 1 2.2. 4 blocks on peer 2 2.3. 2 blocks on peer 3 3. Connect peers 3.1. peer 1 with 2 (1>2) 3.2. peer 1 with 3 (1>(2 and 3)) 4. Generate new blocks 4.1. 3 blocks on peer 1 4.2. 5 blocks on peer 3 5. Stop all peers
def scenario(): LOCAL_HOST = "http://127.0.0.1" # import functions from . import genesis_block from . import create_block from . import connect_peer from . import stop_server from . import block_crosscheck total_cnt = 0 pass_cnt = 0 # 1. Check each peer's genesis block try: assert genesis_block.check(LOCAL_HOST, 3001) assert genesis_block.check(LOCAL_HOST, 3002) assert genesis_block.check(LOCAL_HOST, 3003) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/genesis_block") total_cnt += 1 # 2. Generate new blocks # 2.1. 2 blocks on peer #1 # 2.2. 4 blocks on peer #2 # 2.3. 2 blocks on peer #3 try: assert create_block.addBlocks(LOCAL_HOST, 3001, num=2) assert create_block.check(LOCAL_HOST, 3001, num=2) assert create_block.addBlocks(LOCAL_HOST, 3002, num=4) assert create_block.check(LOCAL_HOST, 3002, num=4) assert create_block.addBlocks(LOCAL_HOST, 3003, num=2) assert create_block.check(LOCAL_HOST, 3003, num=2) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/create_block") total_cnt += 1 # 3. Connect peers # 3.1. peer #1 with #2 (1->2) # 3.2. peer #1 with #3 (1->(2 and 3)) try: assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6002") assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6003") print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/connect_peer") total_cnt += 1 # 4. Generate new blocks # 4.1. 3 blocks on peer #1 # 4.2. 5 blocks on peer #3 try: isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3) assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3) isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5) assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/block_crosscheck") total_cnt += 1 # 5. Stop all peers try: assert stop_server.stopServer(LOCAL_HOST, 3001) assert stop_server.stopServer(LOCAL_HOST, 3002) assert stop_server.stopServer(LOCAL_HOST, 3003) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/stop_server") total_cnt += 1 # return pass_cnt_per_test and total_cnt_per_test return pass_cnt, total_cnt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_single_chain(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 100 blocks in a row with no transaction but the genesis\n blocks = add_new_blocks(manager, 100, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata(force_reload=True)\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 30, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 50 more blocks in a row with no transactions between them\n blocks = add_new_blocks(manager, 50)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n # Mine 15 more blocks with 10 transactions between each block\n for _ in range(15):\n txs = add_new_transactions(manager, 10, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n blocks = add_new_blocks(manager, 1)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n self.assertConsensusValid(manager)", "def test_single_fork_not_best(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 30 blocks in a row with no transactions\n blocks = add_new_blocks(manager, 30, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 5, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 1 blocks\n blocks = add_new_blocks(manager, 1, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Generate a block which will be a fork in the middle of the chain\n # Change the order of the transactions to change the hash\n fork_block1 = manager.generate_mining_block()\n fork_block1.parents = [fork_block1.parents[0]] + fork_block1.parents[:0:-1]\n fork_block1.resolve()\n fork_block1.verify()\n\n # Mine 8 blocks in a row\n blocks = add_new_blocks(manager, 8, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Fork block must have the same parents as blocks[0] as well as the same score\n self.assertEqual(set(blocks[0].parents), set(fork_block1.parents))\n\n # Propagate fork block.\n # This block belongs to case (ii).\n self.assertTrue(manager.propagate_tx(fork_block1))\n fork_meta1 = fork_block1.get_metadata()\n self.assertEqual(fork_meta1.voided_by, {fork_block1.hash})\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 5, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 5 blocks in a row\n # These blocks belong to case (i).\n blocks = add_new_blocks(manager, 5, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 2, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Propagate a block connected to the voided chain\n # These blocks belongs to case (iii).\n sidechain1 = add_new_blocks(manager, 3, parent_block_hash=fork_block1.hash)\n for block in sidechain1:\n meta = block.get_metadata(force_reload=True)\n self.assertEqual(meta.voided_by, {block.hash})\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 2, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Propagate a block connected to the voided chain\n # This block belongs to case (iv).\n fork_block3 = manager.generate_mining_block(parent_block_hash=fork_block1.hash)\n fork_block3.resolve()\n fork_block3.verify()\n self.assertTrue(manager.propagate_tx(fork_block3))\n fork_meta3 = fork_block3.get_metadata()\n self.assertEqual(fork_meta3.voided_by, {fork_block3.hash})\n\n self.assertConsensusValid(manager)", "def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)", "def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()", "def test_accept_depth(self, nodeOneId, nodeTwoId):\n logging.info(\">>> Entered : test_accept_depth \\n\")\n try:\n self.nodes[nodeTwoId].setminingmaxblock(1000)\n self.nodes[nodeTwoId].setexcessiveblock(1010, 4)\n\n # Mine an excessive block. Node One should not accept it\n addr = self.nodes[nodeTwoId].getnewaddress()\n for i in range(0,10):\n self.nodes[nodeOneId].sendtoaddress(addr, 1.0)\n self.nodes[nodeOneId].generate(1)\n time.sleep(2) #give blocks a chance to fully propagate\n counts = [ x.getblockcount() for x in self.nodes[0:2] ]\n\n logging.info(\"Counts: Node1 = %d and Node2 = %d \" %(counts[0], counts[1]))\n assert_equal(counts[0]-counts[1], 1)\n # Mine a block on top. Node 1 should still not accept it\n self.nodes[nodeOneId].generate(1)\n time.sleep(2) #give blocks a chance to fully propagate\n counts = [ x.getblockcount() for x in self.nodes[0:2] ]\n logging.info(\"Counts: Node1 = %d and Node2 = %d \" %(counts[0], counts[1]))\n assert_equal(counts[0]-counts[1], 2)\n\n # Change node 1 to AD=2. The assertion will fail if it doesn't accept the chain now \n self.nodes[nodeTwoId].setexcessiveblock(1010, 2)\n self.nodes[nodeOneId].generate(1)\n time.sleep(2) #give blocks a chance to fully propagate !!!!\n\n counts = [ x.getblockcount() for x in self.nodes[0:2] ]\n logging.info(\"Counts: Node1 = %d and Node2 = %d \" %(counts[0], counts[1]))\n assert_equal(counts[0]-counts[1], 0)\n except (Exception, JSONRPCException) as e1:\n logging.info(e1)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n raise TestAssertionError({\"file_name\": fname, \"line_num\": exc_tb.tb_lineno, \\\n \"error_type\": exc_type.__name__, \"error_msg\": str( e1 ), \\\n \"n1\" : self.bins[nodeOneId], \"n2\" : self.bins[nodeTwoId], \"amount\" : \"N/A\", \"numsig\" : \"N/A\"})", "def test_chain_selection(self, blockchain, genesis, block1, block2, block3, block4, block5, block6):\n blockchain.add(block4)\n # gids:\n # 0 <- 1 <- 3\n # 0 <- 2 <- 4\n\n assert blockchain._leaves == {hash(block3), hash(block4)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1, hash(block3): 2}\n assert blockchain._longest_chain == {hash(genesis), hash(block1), hash(block3)}\n\n blockchain.add(block5)\n # gids:\n # 0 <- 1 <- 3\n # 0 <- 2 <- 4 <- 5\n\n assert blockchain._leaves == {hash(block3), hash(block5)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block5)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block2): 1, hash(block4): 2, hash(block5): 3}\n assert blockchain._longest_chain == {hash(genesis), hash(block2), hash(block4), hash(block5)}\n\n blockchain.add(block6)\n # gids:\n # 0 <- 1 <- 3 <- 6\n # 0 <- 2 <- 4 <- 5\n\n assert blockchain._leaves == {hash(block5), hash(block6)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block5)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._G.node[hash(block6)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block2): 1, hash(block4): 2, hash(block5): 3}\n assert blockchain._longest_chain == {hash(genesis), hash(block2), hash(block4), hash(block5)}", "def _generate_genesis() -> None:\n logging.debug(\"Generating the genesis block\")\n new_recv_block(Block.genesis())", "def test_adding_multiple_blocks(self, blockchain, genesis, block1, block2, block3):\n assert blockchain.get_depth(hash(block1)) == -float('inf')\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block1)\n # graph should look like this:\n # 0 <- 1\n assert hash(block1) in blockchain\n assert blockchain[hash(block1)] == block1\n assert blockchain._leaves == {hash(block1)}\n assert blockchain.get_virtual_block_parents() == {hash(block1)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block2)\n # graph should look like this:\n # 0 <- 1\n # 0 <- 2\n assert hash(block2) in blockchain\n assert blockchain[hash(block2)] == block2\n assert blockchain._leaves == {hash(block1), hash(block2)}\n assert blockchain.get_virtual_block_parents() == {min(hash(block1), hash(block2))}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block3)\n # graph should look like this:\n # 0 <- 1 <- 3\n # 0 <- 2\n assert hash(block3) in blockchain\n assert blockchain[hash(block3)] == block3\n assert blockchain._leaves == {hash(block2), hash(block3)}\n assert blockchain.get_virtual_block_parents() == {hash(block3)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1, hash(block3): 2}\n assert blockchain._longest_chain == {hash(genesis), hash(block1), hash(block3)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(block3), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block3)) is True\n assert blockchain.get_depth(hash(genesis)) == 2\n assert blockchain.get_depth(hash(block1)) == 1\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == 0", "def start_peers(self):\n for i in self.nodes:\n i.start()", "def consensus(self):\n total_peers = 0\n blocks = list()\n for peer in self.peers.values():\n assert len(peer.blocks) > 0\n if not peer.synched:\n continue\n total_peers += 1\n blocks.extend(peer.blocks)\n # blocks = [peer.blocks[-1] for peer in self.peers.values()\n # if len(peer.blocks) and peer.synched]\n\n counts = defaultdict(int)\n heights = dict()\n timestamps = dict()\n for block in blocks:\n heights[block[1]] = block[0]\n counts[block[1]] += 1\n # Retrieve newest timestamp for block\n if block[2] is not None:\n stamp = max([float(X[0]) for X in block[2]])\n assert stamp is not None\n timestamps[block[1]] = stamp\n else:\n timestamps[block[1]] = block[3]\n\n result = list()\n for block_hash, num in counts.items():\n block_height = heights[block_hash]\n consensus_pct = (num / float(total_peers)) * 100.0\n row = (ConsensusBlock(int(block_height), block_hash, timestamps[block_hash]), num, consensus_pct)\n result.append(row)\n\n results = sorted(result, lambda x, y: int(y[0].height - x[0].height))\n half_hour_ago = time.time() - (60*30)\n if len(results):\n # If there isn't enough data to get an accurate Difficulty rating\n # (requires 30 mins of data), then fill out with stuff from Ledger DB\n oldest_result = results[-1]\n oldest_time = min([X[0].stamp for X in results])\n if oldest_time < half_hour_ago:\n merge_rows = ResultsManager.history_fetch(half_hour_ago, oldest_result[0].height)\n for row in merge_rows:\n results.append((row, 0, 100))\n else:\n results = [(row, 0, 100) for row in ResultsManager.history_fetch(half_hour_ago)]\n\n # Verify consensus is above 50%, and notify result manager\n if results[0][2] >= 50:\n # LOG.warning('XXX adding new consensus peers:%r %r', total_peers, results)\n ResultsManager.on_consensus(results[0][0])\n\n return results", "def miner_controller(reward_address, peers, hashes_till_check, DB):\n def make_mint(pubkey, DB):\n address = tools.make_address([reward_address], 1)\n return {'type': 'mint',\n 'pubkeys': [pubkey],\n 'signatures': ['first_sig'],\n 'count': blockchain.count(address, DB)}\n\n def genesis(pubkey, DB):\n target = blockchain.target(DB)\n out = {'version': custom.version,\n 'length': 0,\n 'time': time.time(),\n 'target': target,\n 'diffLength': blockchain.hexInvert(target),\n 'txs': [make_mint(pubkey, DB)]}\n print('out: ' + str(out))\n out = tools.unpackage(tools.package(out))\n return out\n\n def make_block(prev_block, txs, pubkey, DB):\n leng = int(prev_block['length']) + 1\n target = blockchain.target(DB, leng)\n diffLength = blockchain.hexSum(prev_block['diffLength'],\n blockchain.hexInvert(target))\n out = {'version': custom.version,\n 'txs': txs + [make_mint(pubkey, DB)],\n 'length': leng,\n 'time': time.time(),\n 'diffLength': diffLength,\n 'target': target,\n 'prevHash': tools.det_hash(prev_block)}\n out = tools.unpackage(tools.package(out))\n return out\n def restart_workers():\n print(\"Possible solution found, restarting mining workers.\")\n for worker_mailbox in worker_mailboxes:\n worker_mailbox['restart'].set()\n\n def spawn_worker():\n print(\"Spawning worker\")\n restart_signal = multiprocessing.Event()\n work_queue = multiprocessing.Queue()\n worker_proc = multiprocessing.Process(target=miner,\n args=(submitted_blocks, work_queue,\n restart_signal))\n worker_proc.daemon = True\n worker_proc.start()\n return {'restart': restart_signal, 'worker': worker_proc,\n 'work_queue': work_queue}\n\n submitted_blocks = multiprocessing.Queue()\n num_cores = multiprocessing.cpu_count()\n print(\"Creating %d mining workers.\" % num_cores)\n worker_mailboxes = [spawn_worker() for _ in range(num_cores)]\n candidate_block = None\n length = None\n while True:\n length = DB['length']\n if length == -1:\n candidate_block = genesis(reward_address, DB)\n txs = []\n else:\n prev_block = blockchain.db_get(length, DB)\n txs = DB['txs']\n candidate_block = make_block(prev_block, txs, reward_address, DB)\n\n work = (candidate_block, hashes_till_check)\n\n for worker_mailbox in worker_mailboxes:\n worker_mailbox['work_queue'].put(copy.copy(work))\n\n # When block found, add to suggested blocks.\n solved_block = submitted_blocks.get() # TODO(roasbeef): size=1?\n if solved_block['length'] != length + 1:\n continue\n DB['suggested_blocks'].append(solved_block)\n restart_workers()", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def test_fork_different_genesis(self):\n bvh = self.BlockValidationHandler()\n\n # create a new valid chain 5 long from the current root\n new_head = self.btm.generate_chain(self.btm.chain_head, 5,\n {'add_to_store': True})\n self.btm.set_chain_head(new_head[-1])\n\n # generate candidate chain 5 long from it's own genesis\n new_block = self.btm.generate_chain(None, 5,\n {'add_to_cache': True})\n\n bv = self.create_block_validator(new_block[-1], bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block[-1].status == BlockStatus.Invalid)\n self.assertFalse(bvh.result[\"commit_new_block\"])", "async def _new_blocks(self) -> AsyncGenerator[Eth1Block, None]:\n while True:\n try:\n block = self._eth1_data_provider.get_block(\"latest\")\n except BlockNotFound:\n raise Eth1MonitorValidationError(\"Fail to get latest block\")\n target_block_number = BlockNumber(block.number - self._num_blocks_confirmed)\n from_block_number = self.highest_processed_block_number\n if target_block_number > from_block_number:\n # From `highest_processed_block_number` to `target_block_number`\n for block_number in range(\n from_block_number + 1, target_block_number + 1\n ):\n try:\n block = self._eth1_data_provider.get_block(\n BlockNumber(block_number)\n )\n except BlockNotFound:\n raise Eth1MonitorValidationError(\n f\"Block does not exist for block number={block_number}\"\n )\n yield block\n await trio.sleep(self._polling_period)", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}, \\\n\t\t\t\t{'ip': '100.100.100.100', \\\n\t\t\t\t\t'peer id': 'test2', 'port': 1000}])", "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def geth_run_private_blockchain(\n web3: Web3,\n accounts_to_fund: typing.List[bytes],\n geth_nodes: typing.List[GethNodeDescription],\n base_datadir: str,\n chain_id: int,\n verbosity: str,\n random_marker: str,\n):\n # pylint: disable=too-many-locals,too-many-statements,too-many-arguments,too-many-branches\n nodes_configuration = []\n seal_account = None\n for pos, node in enumerate(geth_nodes):\n if pos == 0:\n unlock = True # make the first node miner\n seal_account = privatekey_to_address(node.private_key)\n\n config = geth_node_config(\n node.private_key,\n node.p2p_port,\n node.rpc_port,\n unlock,\n )\n nodes_configuration.append(config)\n\n geth_node_config_set_bootnodes(nodes_configuration)\n\n genesis_path = os.path.join(base_datadir, 'custom_genesis.json')\n geth_generate_poa_genesis(\n genesis_path,\n accounts_to_fund,\n seal_account,\n random_marker,\n )\n logdir = os.path.join(base_datadir, 'logs')\n\n # check that the test is running on non-capture mode, and if it is save\n # current term settings before running geth\n if isinstance(sys.stdin, io.IOBase):\n term_settings = termios.tcgetattr(sys.stdin)\n\n processes_list = geth_run_nodes(\n geth_nodes,\n nodes_configuration,\n base_datadir,\n genesis_path,\n chain_id,\n verbosity,\n logdir,\n )\n\n try:\n geth_wait_and_check(web3, accounts_to_fund, random_marker)\n\n for process in processes_list:\n process.poll()\n\n if process.returncode is not None:\n raise ValueError('geth failed to start')\n\n except (ValueError, RuntimeError) as e:\n # If geth_wait_and_check or the above loop throw an exception make sure\n # we don't end up with a rogue geth process running in the background\n for process in processes_list:\n process.terminate()\n raise e\n\n finally:\n # reenter echo mode (disabled by geth pasphrase prompt)\n if isinstance(sys.stdin, io.IOBase):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, term_settings)\n\n return processes_list", "def get_blocks():\n query = iroha.blocks_query()\n IrohaCrypto.sign_query(query, ADMIN_PRIVATE_KEY)\n for block in net.send_blocks_stream_query(query):\n print('\\nThe next block arrived:', block)", "def generate_graph(self):\n temp_graph = [[] for i in xrange(Parameters.num_peers)]\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n while len(unconnected) > 1:\n node1 = random.sample(unconnected, 1)[0]\n unconnected.remove(node1)\n node2 = random.sample(unconnected, 1)[0]\n temp_graph[node2].append(self.nodes[node1])\n temp_graph[node1].append(self.nodes[node2])\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n i = 0\n for i in xrange(Parameters.num_peers*Parameters.num_neighbours/2-Parameters.num_peers):\n a = random.sample(unconnected, 1)[0]\n b = random.sample(unconnected, 1)[0]\n while b == a:\n b = random.sample(unconnected, 1)[0]\n temp_graph[a].append(self.nodes[b])\n temp_graph[b].append(self.nodes[a])\n graph = {}\n for i in xrange(len(self.nodes)):\n graph[\"P_\" + str(i)] = list(set(temp_graph[i]))\n return graph", "def announce_new_block(block):\n for peer in peers:\n url = \"{}/add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def inner_start_mining(self):\n print(\"Mining a new block\")\n blockchain = self.get_blockchain()\n self.request_transactions(blockchain)\n last_block_hash = blockchain.last_block().header\n complete_hash, nonce = self.proof_of_work(last_block_hash)\n new_block = self.create_block(complete_hash, nonce)\n self.send_block(new_block)\n self.reset_transaction()", "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))", "def verify_chain():\n for (index,block) in enumerate(blockchain):\n if index ==0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index-1]):\n return False\n if not valid_proof(block['transactions'][:-1],block['previous_hash'],block['proof']):\n print('Proof of Work is Invalid')\n return False\n return True", "def mine(self):\n print(\"Mining\")\n\n prev_hash = self.r.get(PREV_HASH_KEY)\n if prev_hash:\n prev_hash = prev_hash.decode('utf-8')\n\n block = Block(prev_hash)\n\n\n # wait to fill the block with transactions\n while not block.full():\n # in between mining\n if self.stop_mining():\n print(\"Someone mined the coins\")\n l = len(block.transactions)\n left = TRANSACTIONS_IN_BLOCK - l\n for _ in range(left):\n self.r.blpop(TRANSACTION_QUEUE_KEY)\n return None\n\n print(\"Searching for transactions to fill the block\")\n # blocking pop from transaction key\n transaction = Transaction.from_redis(self.r, json.loads(self.r.blpop(TRANSACTION_QUEUE_KEY)[1].decode('utf-8')))\n print(\"found a transaction, adding it to block\")\n block.add_transaction(transaction)\n\n # create a new transaction that creates a lazycoin and gives it to the user\n print(\"Block is full, now add a create transaction\")\n print(\"Prev hash = \", prev_hash)\n create = Transaction(\n prev_hash=prev_hash,\n transaction_type='CREATE',\n sender=self.user.pub,\n receiver=self.user.pub,\n )\n\n # sign this transaction and add the signature to the transaction\n print(\"signing transaction\")\n msg, sign = self.user.sign(create)\n create.add_signature(sign)\n\n print(\"adding transaction\")\n block.add_transaction(create)\n\n print(\"finding nonce\")\n nonce = self.solve_puzzle(block)\n\n block.add_nonce(nonce)\n print(\"block done\")\n\n if self.stop_mining():\n print(\"stopping mining\")\n return None\n\n return block", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8dddd\\x03\\xe8\")", "def mine(self):\n new_block = Block(self.block['timestamp'], self.block['car'],\n self.block['id'])\n # link the block to the previous block\n new_block.previous_hash = self._get_previous_hash()\n while True:\n # get a hash\n new_hash = new_block.get_hash()\n # check hash rules, in our case check if the hash starts with\n # self.difficulty number of zeroes\n if new_hash[0] != self.difficulty * \"0\":\n if self.new_block[\"block\"] is None:\n # the hash hasn't been found yet by any other process,\n # therefore increase the nonce and continue\n # miners will use a different mining mechanism in order\n # to increase the probability of finding a hash by\n # a different miner\n new_block.increment_nonce(self.id + 1)\n continue\n break\n break\n\n # NOTE: May happen that two processes find the hash at the same time,\n # because there is not a big difficulty, however, it's not a problem,\n # for sake of the demo it's fine\n\n if self.new_block[\"block\"] is None:\n # this process has found the hash first\n print(self.id, \" - the winner hash\", new_hash)\n new_block.hash = new_hash\n self.new_block[\"block\"] = new_block\n print(self.id, \" - mined the block\")\n else:\n # validate the block found by other process (miner)\n if self.new_block[\"validated\"] is not False:\n print(self.id, \" - validating\")\n # check block's validity\n valid = False\n if self.new_block[\"block\"].is_block_valid():\n # check blockchain's validity when we apply the newly\n # mined block\n if self.is_blockchain_valid(self.new_block[\"block\"]):\n valid = True\n self.new_block[\"validated\"] = valid\n else:\n # NOTE: this demo doesn't take into account the number of\n # miners who approved the block, the block will be rejected\n # if any of them rejected it\n # but usually just more than 50% of miners must approve\n print(self.id, \" - the block has been rejected by other miner\")", "def mine_reorg(self, length):\n\n # make sure all invalidated blocks are node0's\n self.generatetoaddress(self.nodes[0], length, self.nodes[0].get_deterministic_priv_key().address)\n for x in self.nodes[0].p2ps:\n x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))\n x.clear_block_announcements()\n\n tip_height = self.nodes[1].getblockcount()\n hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))\n self.nodes[1].invalidateblock(hash_to_invalidate)\n all_hashes = self.generatetoaddress(self.nodes[1], length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain\n return [int(x, 16) for x in all_hashes]", "def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1" ]
[ "0.6776859", "0.6663315", "0.6528965", "0.62487274", "0.61456776", "0.6103467", "0.6063264", "0.59577507", "0.5954555", "0.59345937", "0.587696", "0.57966447", "0.5755227", "0.56951505", "0.5683918", "0.5678262", "0.5678262", "0.5674126", "0.5667841", "0.5650196", "0.56408125", "0.5621133", "0.5607785", "0.5598323", "0.5585601", "0.55741316", "0.55513513", "0.5548078", "0.5501472", "0.54925865" ]
0.7098296
0
It's common to forget to initialize your variables to the same values, or (less commonly) if you update them in some other way than adam, to get them out of sync. This function checks that variables on all MPI workers are the same, and raises an AssertionError otherwise
def check_synced(localval, comm=None): comm = comm or MPI.COMM_WORLD vals = comm.gather(localval) if comm.rank == 0: assert all(val==vals[0] for val in vals[1:]),\ 'MpiAdamOptimizer detected that different workers have different weights: {}'.format(vals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multiple_rng(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.random_state_variables is not rng2.random_state_variables", "def init_consistent_qa_variables(self):\n return tuple()", "def test_global():\n global_assumptions.add(x > 0)\n assert (x > 0) in global_assumptions\n global_assumptions.remove(x > 0)\n assert not (x > 0) in global_assumptions\n # same with multiple of assumptions\n global_assumptions.add(x > 0, y > 0)\n assert (x > 0) in global_assumptions\n assert (y > 0) in global_assumptions\n global_assumptions.clear()\n assert not (x > 0) in global_assumptions\n assert not (y > 0) in global_assumptions", "def guarantee_initialized_variables(self):\n\n global_vars = tf.global_variables()\n is_not_initialized = self.sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\n for x in ['[#] Initialized: ' + str(i.name) for i in\n not_initialized_vars]:\n print(x)\n\n if len(not_initialized_vars):\n self.sess.run(tf.variables_initializer(not_initialized_vars))\n return True\n else:\n return False", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def assert_goodness(self):\n if self._setted:\n self.assert_stored_iss()\n self.assert_stored_ks()\n ## Check idxs\n self.assert_stored_idxs()\n ## Check sp_relative_pos\n self.assert_stored_sp_rel_pos()", "def validate(self):\n variables = ['waterThickness', 'waterPressure']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')", "def test_var_not_set(set_tempdir):\n tasks = run_n_simple_tasks(1)\n\n log_path_matcher = LogPathCorrectnessMatcher(default_log_path(tasks[0][\"jobid\"]))\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher", "def check_integrity(self):\r\n nodes = graph.ops(self.inputs, self.outputs)\r\n if self.apply_nodes != nodes:\r\n missing = nodes.difference(self.apply_nodes)\r\n excess = self.apply_nodes.difference(nodes)\r\n raise Exception(\r\n \"The nodes are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for node in nodes:\r\n if node.fgraph is not self:\r\n raise Exception(\"Node should belong to the FunctionGraph.\",\r\n node)\r\n for i, variable in enumerate(node.inputs):\r\n if variable.fgraph is not self:\r\n raise Exception(\r\n \"Input of node should belong to the FunctionGraph.\",\r\n variable, (node, i))\r\n if (node, i) not in variable.clients:\r\n raise Exception(\"Inconsistent clients list.\",\r\n (node, i), variable.clients)\r\n variables = set(graph.variables(self.inputs, self.outputs))\r\n if set(self.variables) != variables:\r\n missing = variables.difference(self.variables)\r\n excess = self.variables.difference(variables)\r\n raise Exception(\r\n \"The variables are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for variable in variables:\r\n if (variable.owner is None and\r\n variable not in self.inputs and\r\n not isinstance(variable, graph.Constant)):\r\n raise Exception(\"Undeclared input.\", variable)\r\n if variable.fgraph is not self:\r\n raise Exception(\"Variable should belong to the FunctionGraph.\",\r\n variable)\r\n for node, i in variable.clients:\r\n if node == 'output':\r\n if self.outputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, self.outputs[i])\r\n continue\r\n if node not in nodes:\r\n raise Exception(\"Client not in FunctionGraph.\",\r\n variable, (node, i))\r\n if node.inputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, node.inputs[i])", "def test_global_settings_data():\n\n def check_initialized(index):\n if index == 0:\n sleep(0.1)\n\n with pytest.raises(AttributeError):\n _global_settings_data.testing_index # pylint: disable=W0104\n _global_settings_data.testing_index = index\n\n sleep(0.5)\n return (\n test_global_settings_data_obj.shared_state[\"_output_type\"] is None\n and test_global_settings_data_obj.shared_state[\"root_cm\"] is None\n and _global_settings_data.testing_index == index\n )\n\n results = [delayed(check_initialized)(index) for index in range(5)]\n\n assert (delayed(all)(results)).compute()", "def test_distributeSettings(self):\n self.action._distributeSettings()\n if context.MPI_RANK == 0:\n self.assertEqual(self.cs, self.action.o.cs)\n else:\n self.assertNotEqual(self.cs, self.action.o.cs)\n original = {ss.name: ss.value for ss in self.cs.values()}\n current = {ss.name: ss.value for ss in self.action.o.cs.values()}\n # remove values that are *expected to be* different...\n # CONF_CROSS_SECTION is removed because unittest is being mean about\n # comparing dicts...\n for key in [\"stationaryBlockFlags\", \"verbosity\", CONF_CROSS_SECTION]:\n if key in original:\n del original[key]\n if key in current:\n del current[key]\n\n for key in original.keys():\n self.assertEqual(original[key], current[key])", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def test_shared_wires(self):\n wires = range(2)\n estimation_wires = range(1, 3)\n\n with pytest.raises(ValueError, match=\"No wires can be shared between the wires\"):\n quantum_monte_carlo(\n lambda: None, wires=wires, target_wire=0, estimation_wires=estimation_wires\n )", "def check_consistency(self):\n raise NotImplementedError()", "def _check_duplicates(self):\n # check variables\n counter = Counter(self.variables())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateVariables(duplicates)\n\n # check parameters\n counter = Counter(self.parameters())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateParameters(duplicates)", "def assert_stored_sp_rel_pos(self):\n# ## Temporal\n# if self.sp_relative_pos is not None:\n# if self._constant_neighs:\n# if self.staticneighs:\n# assert(len(np.array(self.sp_relative_pos).shape) == 3)\n# else:\n# assert(len(np.array(self.sp_relative_pos).shape) == 4)\n# #################\n array_types = [list, np.ndarray]\n if self.sp_relative_pos is not None:\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if type(self.sp_relative_pos) in [float, int, np.int32, np.int64]:\n# ### Probably redundant\n# # it is needed or possible this situation?\n# pass\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if self.ks is None:\n# assert(self.staticneighs)\n# assert(len(self.sp_relative_pos) == len(self.iss))\n if self.staticneighs:\n assert(len(self.sp_relative_pos) == len(self.iss))\n ## Assert deep 3\n if len(self.iss):\n assert(type(self.sp_relative_pos[0]) in array_types)\n else:\n assert(self.ks is not None)\n assert(len(self.sp_relative_pos) == len(self.ks))\n if type(self.sp_relative_pos[0]) in array_types:\n if not self.staticneighs:\n assert(len(self.sp_relative_pos[0]) == len(self.iss))\n if len(self.sp_relative_pos[0]) > 0:\n assert(type(self.sp_relative_pos[0][0]) in array_types)", "def test_update_mpi(self):\n sim_interface = SimInterface(setup_populated_instr_McStas())\n sim_interface.show_interface()\n\n fake_change = FakeChange(new=3)\n sim_interface.update_mpi(fake_change)\n self.assertEqual(sim_interface.mpi, 3)\n\n # Check input that wouldn't work is ignored\n fake_change = FakeChange(new=\"wrong input\")\n sim_interface.update_mpi(fake_change)\n self.assertEqual(sim_interface.mpi, 3)", "def test_pm_Completeness(self):\n pass", "def check_not_in_mpiexec(self):\n \n if 'HYDRA_CONTROL_FD' in os.environ or 'PMI_FD' in os.environ:\n self.skip('cannot run the socket tests under mpi process manager')", "def verify_assignments(gpu_assign, cpu_assign, data, gpu_clusters, cpu_clusters, verbose = 0, iTest = -1): \n # check that assignments are equal\n differences = sum(gpu_assign != cpu_assign)\n # print \"differences =\", differences\n error = 0\n if(differences > 0):\n error = 1\n if verbose:\n if iTest >= 0:\n print \"Test\", iTest,\n print \"*** ERROR ***\", differences, \"differences\"\n iDiff = np.arange(gpu_assign.shape[0])[gpu_assign != cpu_assign]\n print \"iDiff\", iDiff\n for ii in iDiff:\n print \"data point is\", data[:,ii]\n print \"cpu assigned to\", cpu_assign[ii]\n print \" with center at (cpu)\", cpu_clusters[:,cpu_assign[ii]]\n print \" with center at (gpu)\", gpu_clusters[:,cpu_assign[ii]]\n print \"gpu assigned to\", gpu_assign[ii]\n print \" with center at (cpu)\", cpu_clusters[:,gpu_assign[ii]]\n print \" with center at (gpu)\", gpu_clusters[:, gpu_assign[ii]]\n print \"\"\n print \"cpu calculated distances:\"\n print \" from point\", ii, \"to:\"\n print \" cluster\", cpu_assign[ii], \"is\", np.sqrt(np.sum((data[:,ii]-cpu_clusters[:,cpu_assign[ii]])**2))\n print \" cluster\", gpu_assign[ii], \"is\", np.sqrt(np.sum((data[:,ii]-cpu_clusters[:,gpu_assign[ii]])**2))\n print \"gpu calculated distances:\"\n print \" from point\", ii, \"to:\"\n print \" cluster\", cpu_assign[ii], \"is\", np.sqrt(np.sum((data[:,ii]-gpu_clusters[:,cpu_assign[ii]])**2))\n print \" cluster\", gpu_assign[ii], \"is\", np.sqrt(np.sum((data[:,ii]-gpu_clusters[:,gpu_assign[ii]])**2))\n else:\n if verbose:\n if iTest >= 0:\n print \"Test\", iTest,\n print \"Cluster assignment is OK\"\n return error", "def initialize_mpi(self):\n return False", "def test_equal(self):\r\n\r\n a_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n b_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n b_x_dist = 3\r\n b_y_dist = 3\r\n b_num_to_win = 1\r\n b_game = Game(b_players, b_x_dist, b_y_dist, b_num_to_win)\r\n\r\n c_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n c_x_dist = 3\r\n c_y_dist = 3\r\n c_num_to_win = 1\r\n c_game = Game(c_players, c_x_dist, c_y_dist, c_num_to_win)\r\n\r\n self.assertTrue(b_game == a_game == c_game)\r\n\r\n a_game.play_game()\r\n b_game.play_game()\r\n\r\n self.assertTrue(a_game == b_game)\r\n self.assertFalse(c_game == a_game)\r\n\r\n c_game.play_game()\r\n\r\n self.assertTrue(b_game == a_game == c_game)", "def raise_if_inconsistent(self):\n state = VMStateInconsistent()\n state.qemu = self.qemu.is_running()\n state.proc = bool(self.qemu.proc())\n state.ceph_lock = self.ceph.locked_by_me()\n self.log.debug(\n \"check-state-consistency\",\n is_consistent=state.is_consistent(),\n qemu=state.qemu,\n proc=state.proc,\n ceph_lock=state.ceph_lock,\n )\n if not state.is_consistent():\n raise state", "def verify(self):\n assert self.total_threads and self.total_threads > 0\n assert self.login_udp_port and 0 < self.login_udp_port < 65535\n assert self.seed > 0\n assert self.initial_state\n assert self.ports_pool and \\\n 0 < self.ports_pool[0] < self.ports_pool[1] < 65535\n assert self.login_udp_port < self.ports_pool[0] or \\\n self.login_udp_port > self.ports_pool[1]\n assert self.global_grid is not None\n self.global_grid.verify()\n if self.administrator_cfg:\n self.administrator_cfg.verify()\n assert self.administrator_cfg.udp_port < self.ports_pool[0] or \\\n self.administrator_cfg.udp_port > self.ports_pool[1]\n assert self.administrator_cfg.udp_port != self.login_udp_port", "def check_variables(self, model):\n for rhs_var in model.rhs.keys():\n if rhs_var.name in model.variables.keys():\n var = model.variables[rhs_var.name]\n\n different_shapes = not np.array_equal(\n model.rhs[rhs_var].shape, var.shape\n )\n\n not_concatenation = not isinstance(var, pybamm.Concatenation)\n\n not_mult_by_one_vec = not (\n isinstance(\n var, (pybamm.Multiplication, pybamm.MatrixMultiplication)\n )\n and (\n pybamm.is_matrix_one(var.left)\n or pybamm.is_matrix_one(var.right)\n )\n )\n\n if different_shapes and not_concatenation and not_mult_by_one_vec:\n raise pybamm.ModelError(\n \"variable and its eqn must have the same shape after \"\n \"discretisation but variable.shape = \"\n \"{} and rhs.shape = {} for variable '{}'. \".format(\n var.shape, model.rhs[rhs_var].shape, var\n )\n )", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def _check_consistency(self):\n\n # Run forward inference with n_sim=2 and catch any exception\n try:\n _, sim_data = self._forward_inference(n_sim=2, n_obs=10)\n except Exception as err:\n raise SimulationError(repr(err))\n\n # Run summary network check\n if self.summary_stats is not None:\n try:\n _ = self.summary_stats(sim_data)\n except Exception as err:\n raise SummaryStatsError(repr(err))\n\n # TODO: Run checks whether the network works with the data format\n\n # TODO: Run checks that loss works with the provided network", "def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")", "def verify_runconfig(master_host, namespace, job_name, replica, num_ps,\n num_workers, num_evaluators):\n is_chief = True\n num_replicas = 1\n if replica == \"ps\":\n is_chief = False\n num_replicas = num_ps\n elif replica == \"worker\":\n is_chief = False\n num_replicas = num_workers\n elif replica == \"evaluator\":\n is_chief = False\n num_replicas = num_evaluators\n\n # Construct the expected cluster spec\n chief_list = [\n \"{name}-chief-0.{ns}.svc:2222\".format(name=job_name, ns=namespace)\n ]\n ps_list = []\n for i in range(num_ps):\n ps_list.append(\"{name}-ps-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n worker_list = []\n for i in range(num_workers):\n worker_list.append(\"{name}-worker-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n evaluator_list = []\n for i in range(num_evaluators):\n evaluator_list.append(\"{name}-evaluator-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n cluster_spec = {\n \"chief\": chief_list,\n \"ps\": ps_list,\n \"worker\": worker_list,\n }\n if num_evaluators > 0:\n cluster_spec[\"evaluator\"] = evaluator_list\n\n for i in range(num_replicas):\n full_target = \"{name}-{replica}-{index}\".format(\n name=job_name, replica=replica.lower(), index=i)\n actual_config = get_runconfig(master_host, namespace, full_target)\n full_svc = \"{ft}.{ns}.svc\".format(ft=full_target, ns=namespace)\n expected_config = {\n \"task_type\": replica,\n \"task_id\": i,\n \"cluster_spec\": cluster_spec,\n \"is_chief\": is_chief,\n \"master\": \"grpc://{fs}:2222\".format(fs=full_svc),\n \"num_worker_replicas\": num_workers + 1, # Chief is also a worker\n \"num_ps_replicas\": num_ps,\n } if not replica == \"evaluator\" else {\n # Evaluator has special config.\n \"task_type\": replica,\n \"task_id\": 0,\n \"cluster_spec\": {},\n \"is_chief\": is_chief,\n \"master\": \"\",\n \"num_worker_replicas\": 0,\n \"num_ps_replicas\": 0,\n }\n\n # Compare expected and actual configs\n if actual_config != expected_config:\n msg = \"Actual runconfig differs from expected. Expected: {0} Actual: {1}\".format(\n str(expected_config), str(actual_config))\n logging.error(msg)\n raise RuntimeError(msg)", "def verify_and_freeze(self):\n if self._driver is None and not self._strobers:\n raise ValueError(\n 'internal %s is not driven by anything' % self._name)\n if not self._users:\n raise ValueError(\n 'internal %s is never used' % self._name)\n self._frozen = True" ]
[ "0.59343547", "0.59310293", "0.5616036", "0.5610274", "0.55715793", "0.55436534", "0.55064803", "0.5479019", "0.5468771", "0.5462085", "0.5445539", "0.54302627", "0.54081", "0.5398708", "0.53432184", "0.5342977", "0.5330605", "0.53292537", "0.5326701", "0.5321385", "0.5313813", "0.53118324", "0.5293624", "0.5273868", "0.5269128", "0.5261808", "0.5252629", "0.524905", "0.52403414", "0.5226763" ]
0.6958268
0
This function is used for create the nested dict.
def nested_dict(): try: num_list = [1, 2, 3, 4] new_dict = current = {} for name in num_list: current[name] = {} current = current[name] print(new_dict) except ValueError as e: logger.error("Not find the dictnary"+str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dict(self, item, external_id=True, no_html=False, depth=1, optimize=False):\n if external_id:\n key_type = \"external_id\"\n else:\n key_type = \"field_id\"\n\n dictionary = dict([(field[key_type], {\"label\":field[\"label\"], \"type\": field[\"type\"], \"value\": self.getFieldValue(field, no_html, external_id=external_id, depth=depth, optimize=optimize)}) for field in item[\"fields\"]])\n return {'item': item[\"item_id\"], 'values':dictionary}", "def nested_dict():\n return defaultdict(nested_dict)", "def makeDict(self, item, nested=False, no_html=False):\n dictionary = dict([(field[\"external_id\"], self.getFieldValue(field, nested, no_html)) for field in item[\"fields\"]])\n return {'item': item[\"item_id\"], 'values':dictionary}", "def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d", "def as_dict(self):\n def append(d, key, value, is_iterative, is_primitive, is_enum):\n if value is None:\n if is_iterative:\n value = []\n elif is_primitive == False and is_enum == False:\n if is_iterative:\n value = map(lambda i : i.as_dict(), value)\n else:\n value = value.as_dict()\n d[key] = value\n\n # Populate a deep dictionary.\n d = dict()\n append(d, 'file', self.__file, False, False, False)\n append(d, 'file_reference', self.__file_reference, False, False, False)\n append(d, 'name', self.__name, False, True, False)\n return d", "def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root", "def get_data_to_create_object(self):\n return {}", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def make_dicts(self):\n self._dicts = [tree.to_dict() for tree in self.reaction_trees]\n self._update_route_dict(self._dicts, \"dict\")", "def _dictobj(self):\n\n theDict = {\"__table__\":\"Room\",\n \"id\":1,\n \"name\":\"Test Room\",\n \"roomTypeId\" : 1 }\n return theDict", "def as_dict(self):\n def append(d, key, value, is_iterative, is_primitive, is_enum):\n if value is None:\n if is_iterative:\n value = []\n elif is_primitive == False and is_enum == False:\n if is_iterative:\n value = map(lambda i : i.as_dict(), value)\n else:\n value = value.as_dict()\n d[key] = value\n\n # Populate a deep dictionary.\n d = dict()\n append(d, 'has_constant_offset', self.__has_constant_offset, False, True, False)\n append(d, 'length', self.__length, False, True, False)\n append(d, 'uom', self.__uom, False, True, False)\n return d", "def __build_dictionary(self):\n def recursive_generation(t):\n \"\"\"\n Nested helper function that recursively loops through an XML node to construct a dictionary.\n Solution from http://stackoverflow.com/a/10077069 (2013-01-19)\n \"\"\"\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n\n if children:\n dd = defaultdict(list)\n\n for dc in map(recursive_generation, children):\n for k, v in dc.iteritems():\n dd[k].append(v)\n\n d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}\n\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())\n\n if t.text:\n text = t.text.strip()\n\n if children or t.attrib:\n if text:\n d[t.tag]['#text'] = text\n else:\n d[t.tag] = text\n\n return d\n \n string_repr = etree.tostring(self._config_file, pretty_print=True)\n element_tree = cElementTree.XML(string_repr)\n \n self._config_dict = recursive_generation(element_tree)\n self._config_dict = self._config_dict[self._config_dict.keys()[0]]", "def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|", "def as_dict(self):\n return dict((key, value) for key, value, depth in self.entries.itervalues())", "def create_parent_dic(self):\n d = {}\n for tier_ID in self.tier_hierarchy:\n for child in self.tier_hierarchy[tier_ID]:\n\n d[child[\"id\"]] = tier_ID\n self.child_parent_dic = d", "def creating_dict(i, states):\n # base case\n if i == 5:\n # no more edges - recursion ends here\n return {'barcode': []}\n\n # iterative case\n else:\n # this is a tree structure where the node contains timepoint information and barcode information\n # and three edges link to other nodes that represent lineages in three differnet states\n updated_dict = {'t{}'.format(i): {state: creating_dict(i + 1, states) for state in states}}\n updated_dict['t{}'.format(i)].update({'barcode': []})\n return updated_dict", "def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")", "def __init__(self):\n self.structure = {}", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def default_nested(self, data, many, **kwargs):\n if not data.get(\"metadata\"):\n data[\"metadata\"] = {}\n if not data.get(\"pids\"):\n data[\"pids\"] = {}\n\n return data", "def make_dict(cls, fields, fields_kwargs):\n return utils.make_dict(fields, fields_kwargs)", "def build_dct(dic, keys, value):\n key = keys.pop(0)\n if len(keys):\n dic.setdefault(key, {})\n build_dct(dic[key], keys, value)\n else:\n # Transform cookbook default attribute strings into proper booleans\n if value == \"false\":\n value = False\n elif value == \"true\":\n value = True\n # It's a leaf, assign value\n dic[key] = value", "def __create_level_entries_dict__(self,\n tree_level_labels,\n tree_level_values,\n ):\n # | - create_level_entries_dict\n level_entries_dict = {}\n for index, variable in enumerate(tree_level_labels):\n level_entries_dict[variable] = tree_level_values[index]\n\n return(level_entries_dict)\n # __|", "def _deep_asdict(self):\n return {\n \"schema\": self.schema._asdict(),\n \"publications\": {\n k: p._deep_asdict() for (k, p) in self.publications.items()\n },\n }", "def _encode_dictionary(data, name=\"Second\", sub=False):\n\n if sub:\n root = ET.Element(\"Field\", {\"Name\": f'{name}', \"Type\": \"elsystem.collections.dictionary\"})\n else: \n root = ET.Element(\"elsystem.collections.dictionary\")\n\n items = ET.SubElement(root, 'Field', {'Name': 'Items', 'Type': 'elsystem.collections.vector'})\n\n index = 0\n\n for key, val in data.items():\n\n pair = ET.SubElement(items, 'Field', {'Name': f'E{index}', 'Type': 'elsystem.collections.pair'})\n \n if type(val) == dict:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n sub_dict = _encode_dictionary(data=val, name=\"Second\", sub=True)\n pair.append(sub_dict)\n elif type(val) == list:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n sub_vec = _encode_list(data=val, name=F'E{index}', sub=True)\n pair.append(sub_vec)\n else:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n ET.SubElement(pair, 'Field', {'Name': 'Second', 'Value': _encode_value(val)}) \n\n index += 1\n\n ET.SubElement(items, 'Field', {'Name': 'count', 'Value': _encode_value(index)})\n\n if sub:\n return root \n else:\n return ET.tostring(root)", "def build(self, data: dict):", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def make_dict(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n return _DictMaker(struct_class=cls, positional_args=args, keyword_args=kwargs).make_dict()", "def createStructure(self, root, dirDict):\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n child.createDirectory()\n self.createStructure(child, dirDict[x])\n else:\n child.setContent(dirDict[x].replace('\\n', os.linesep))", "def create_city():\n city = {}\n city['biysk'] = {}\n city['biysk']['barnaul'] = 9\n city['biysk']['novosibirsk'] = 11\n city['biysk']['belokurikha'] = 8\n city['barnaul'] = {}\n city['barnaul']['tomsk'] = 4\n city['belokurikha'] = {}\n city['belokurikha']['novosibirsk'] = 2\n city['novosibirsk'] = {}\n city['novosibirsk']['barnaul'] = 2\n city['novosibirsk']['tomsk'] = 5\n city['novosibirsk']['omsk'] = 20\n city['tomsk'] = {}\n city['tomsk']['krasnoyarsk'] = 6\n city['krasnoyarsk'] = {}\n city['krasnoyarsk']['omsk'] = 7\n city['omsk'] = {}\n return city" ]
[ "0.650134", "0.64788336", "0.62718195", "0.62711304", "0.62704086", "0.6265792", "0.6252099", "0.6152929", "0.60821116", "0.606705", "0.60655373", "0.60154074", "0.5973867", "0.596138", "0.5956204", "0.59232026", "0.59190255", "0.58948386", "0.58891314", "0.58726054", "0.5869975", "0.5865327", "0.5847803", "0.58420926", "0.5839227", "0.58295393", "0.5816942", "0.578422", "0.5783976", "0.57692355" ]
0.7117791
0
Parses attributes for given hosts, then checks if hosts are up and then calls path_check function with working hosts.
def ip_check(): hosts = [] valid_hosts = [] for item in sys.argv: if '@' in item: hosts.append(item) for i in hosts: host = i.split('@')[1].split(':')[0] command = os.system('ping -c 1 '+host+' > /dev/null') if command == 0: valid_hosts.append(i) if valid_hosts: path_check(valid_hosts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_check(hosts):\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p '+remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)", "def test_check_process_servers(self):\n self.cmd._process_servers(TEST_HOSTS, self.cloud_project)\n\n for host_id, test_host in TEST_HOSTS.items():\n host = CloudHost.objects.get(host_id=host_id)\n ips = host.ip_addresses\n self.assertEqual(host.hostname, test_host['hostname'])\n self.assertIn(test_host['tag'], host.tags.names())\n self.assertEqual(self.cloud_provider, host.cloudprovider)\n for ip in test_host['ips']:\n self.assertIn(ip, list(ips))\n self.assertEqual(host.hypervisor.hostname, test_host['hypervisor'])\n\n # check the creation date only for new hosts\n if host_id.find('_os_') != -1:\n self.assertEqual(\n datetime.strptime(\n test_host['created'],\n self.cmd.DATETIME_FORMAT\n ),\n host.created,\n )", "def check_hosts(zk,host_name,task,scheduler_log):\n\n #scheduler_log.debug(\"Scheduler Working...!!!\")\n try:\n #Leader Election\n leader = leaderCheck(zk=zk)\n #scheduler_log.debug(\"Leader Election Over\")\n #Update alive status to zookeeper - seems unnecessary\n imalive(zk=zk)\n #scheduler_log.debug(\"Alive Status Updated\")\n\n #If current Host is the Leader perform Scheduled Checks \n if (leader == host_name):\n scheduler_log.debug(\"%s : I am the Leader\"%host_name)\n\n #Fetch List of Hosts - From API\n host_dict = list_hosts(nova)\n allhosts = host_dict['all_list']\n api_down_nodes = host_dict['down_list']\n dishosts = host_dict['disabled_list']\n\n zk_all = zk.get_children(\"/openstack_ha/hosts/all\")\n zk_alive = zk.get_children(\"/openstack_ha/hosts/alive\")\n \n #Fetch Down nodes that are already Handeled - From Zookeeper\n zk_down = zk.get_children(\"/openstack_ha/hosts/down\")\n\n #Fetch nodes that are down and not already handled - From Zookeeper\n calculated_down_nodes = list(set(zk_all) - set(zk_alive))\n\n #Find Nodes Where Scheduler Only failed\n scheduler_down = list(set(calculated_down_nodes).difference(set(api_down_nodes)))\n for node in scheduler_down:\n scheduler_log.debug(\"HA Scheduler Failed on Node : %s \"%node)\n \n #Find Nodes Where API Only failed \n api_down = list(set(api_down_nodes).difference(set(calculated_down_nodes)))\n for node in api_down:\n scheduler_log.debug(\"API Failed on Node : %s \"%node)\n if node not in zk_all:\n scheduler_log.debug(\"HA Scheduler not even initialized %s\"%node)\n\n #Find nodes where both API and Zookeeper are failed \n api_scheduler_down = list(set(api_down_nodes).intersection(set(calculated_down_nodes)))\n\n # Possible Host states - Api only failure | Complete Host Failure ( Not yet Handled | Handling | Handled )\n if(len(api_scheduler_down))==0:\n scheduler_log.debug(\"Hosts working Normally....!!!\")\n else:\n scheduler_log.warning(\"More likely Disaster\")\n #skip if maintance\n # Here check the host in api_down_nodes(api) are present in calculated_down_nodes\n #if present start the instance migrations\n # Checking whether Cluster is Still under HA Policy\n # high availabity contiditions\n if len(api_scheduler_down) <= len(allhosts) - 1:\n scheduler_log.warn(\"Seems like Manageble Disaster\")\n for host in api_scheduler_down:\n scheduler_log.warning(\"Both Api and HA scheduler on\" +host+\" are down\")\n #checks whether down host from api is un handled(not present in down node calculate from zookeeper )\n #(host in zk_all and host not in zk_alive) == calculated_down_nodes\n if host in zk_down:\n #Node will present in zk_down only when all of it's instances are migrated\n scheduler_log.debug(\"Host %s Already handled...!!!!!\"%host)\n else:\n #Node down on api,zk and ( not handled | handling )\n if host not in dishosts:\n #Node Not disabled | disabled reason is not skippable\n scheduler_log.debug(host+\" is not disabled or reason is not maintenance\")\n if(zk.exists(\"/openstack_ha/hosts/time_out/\"+host)==None):\n scheduler_log.debug(\"Inside Time out Node Creation\")\n \n #adding host down time\n host_down_time = time.time()\n host_down_time = str.encode(str(host_down_time))\n scheduler_log.debug(host_down_time)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host, host_down_time)\n \n #adding time_suffix for json_dump file name\n temp_time=time.localtime(time.time()) \n time_suffix=str(temp_time.tm_mday)+\"_\"+str(temp_time.tm_mon)+\"_\"+\\\n str(temp_time.tm_year)+\"_\"+str(temp_time.tm_hour)+\"_\"+\\\n str(temp_time.tm_min)\n enc_time_suffix=str.encode(time_suffix)\n scheduler_log.debug(time_suffix)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\",enc_time_suffix)\n\n # call notification_mail(subj,msg) | Adding Down Node details to Notification \n try:\n subject = \"DGP Office VDI Node Down: %s\"%host\n message = \"Please Check the Network Connectivity and Powersupply as soon as possible\"\n notification_mail(subject,message,to_email=['[email protected]'])\n\n message = \"Please Contact System Administrator\"\n notification_mail(subject,message)\n scheduler_log.debug(\"mail in Scheduler...!\")\n except Exception as e:\n scheduler_log.debug(e)\n scheduler_log.debug(\"Error....! mail scheduler..!\")\n\n # add ping test\n ping_status=ping_check(host)\n if(ping_status):\n scheduler_log.debug(\"Not a Disaster\")\n scheduler_log.debug(\"ping test success....!!! Node is alive... Please Check the APIs,HA Scheduler and other Openstack Services\")\n\n else:\n scheduler_log.warning(\"Ping test also Failed on \"+host+\" proceed with migration\")\n if (zk.exists(\"/openstack_ha/hosts/start_migration/\"+ host)): # it checks the permission from the dashborad\n scheduler_log.warning(\" api down host :\"+host+\"present in zookeeper down_node:\")\n scheduler_log.debug(\"Strart migration....!!!!!\")\n scheduler_log.debug(\"migrating instances from the \"+host)\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode() \n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n #check for time out\n scheduler_log.debug(\"Checking Timeout for Down Node\",host)\n curent_time = time.time()\n if (zk.exists(\"/openstack_ha/hosts/time_out/\"+host)):\n down_host_failuretime = zk.get(\"/openstack_ha/hosts/time_out/\"+host)[0]\n down_host_failuretime = down_host_failuretime.decode(encoding='UTF-8')\n scheduler_log.warning(\"down_host_failuretime\",down_host_failuretime)\n down_host_failuretime = float(down_host_failuretime)\n time_interval = curent_time - down_host_failuretime\n if time_interval>migrate_time:\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode()\n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n scheduler_log.debug(\"Will Wait for another %d\"%(migrate_time-time_interval))\n else:\n scheduler_log.debug(\"%s Node Does'nt have TimeOut Value. Hence will not migrate forever\"%host)\n else:\n scheduler_log.debug(\"Host %s Under Maintenance\"%host)\n \n else:\n scheduler_log.warning(\"Un-Manageble Disaster Too many Nodes are down\")\n else:\n scheduler_log.debug(\"%s : Leader is %s\"%(host_name,leader))\n\n except Exception as e:\n if issubclass(e.__class__,kexception.NoNodeError):\n scheduler_log.exception(\"No node error\")\n elif any(issubclass(e.__class__, lv) for lv in kazoo_exceptions):\n scheduler_log.exception(\"Kazoo Exception.....: \")\n time.sleep(2)\n try:\n zk = KazooClient(hosts='127.0.0.1:2181')\n zk.start() \n Node_creation = createNodeinAll(zk=zk, host_name=host_name)\n election_Node = election_node(zk=zk, host_name=host_name)\n except:\n pass\n else:\n scheduler_log.warning(\"Unhandled Error \")\n scheduler_log.exception(\"\")", "def ping_many_updown_iter(self, hosts):\n raise NotImplementedError()", "def verify_all_stack_hosts(self):\n for _ in range(2):\n self.verify_stack_up()\n self.verify_no_cable_errors()\n self.verify_stack_hosts()\n self.verify_traveling_dhcp_mac()\n self.verify_unicast_not_looped()\n self.verify_no_bcast_to_self()\n self.verify_stack_has_no_loop()\n self.flap_all_switch_ports()", "def ping_many_updown(self, hosts):\n raise NotImplementedError()", "def handle_args(args: Namespace) -> list:\n # If no targets provided, assume were finding them on network.\n # Once we have targets, if no test given, port/service scan them.\n if not args.target:\n low(\"Target not supplied, running host scan.\")\n hosts = get_hosts(verify_subnet(args.subnet))\n else:\n low(\"Target supplied: {}\".format(args.target))\n hosts = [Host(host) for host in args.target]\n\n if args.user and args.passwd:\n low(\"Username and Password supplied for tests, {}:{}\".format(args.user, args.passwd))\n for host in hosts:\n host.credentials = {'user': args.user, 'passwd': args.passwd}\n\n return hosts", "def verify_stack_up(self, prop=1.0, timeout=25):\n for _ in range(timeout):\n links = 0\n links_up = 0\n for i, dpid in enumerate(self.dpids):\n dp_name = self.dp_name(i)\n for link in self.non_host_links(dpid):\n status = self.stack_port_status(dpid, dp_name, link.port)\n links += 1\n if status == 3: # up\n links_up += 1\n prop_up = links_up / links\n if prop_up >= prop:\n return\n time.sleep(1)\n self.fail('not enough links up: %f / %f' % (links_up, links))", "def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def test_hosts_pass(self):\n expected_host_list = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']\n for hostgroup_hosts in [[\"host1\", \"host2\"], [\"84000000600A098000A4B28D0030102E5C3DFC0F\",\n \"84000000600A098000A4B28D003010315C3DFC11\"]]:\n self._set_args({\"state\": \"present\", \"name\": \"hostgroup1\", \"hosts\": hostgroup_hosts})\n hostgroup_object = NetAppESeriesHostGroup()\n\n with mock.patch(self.REQ_FUNC, return_value=(200, self.HOSTS_GET_RESPONSE)):\n for item in hostgroup_object.hosts:\n self.assertTrue(item in expected_host_list)\n\n # Create hostgroup with no hosts\n self._set_args({\"state\": \"present\", \"name\": \"hostgroup1\"})\n hostgroup_object = NetAppESeriesHostGroup()\n with mock.patch(self.REQ_FUNC, return_value=(200, [])):\n self.assertEqual(hostgroup_object.hosts, [])", "def ping_many_iter(self, hosts, *args, **kwargs):\n for state, ip in self.ping_many_updown_iter(hosts, *args, **kwargs):\n if state=='up':\n yield ip", "def include_up_hosts(nmap_host):\n if nmap_host.status == 'up':\n return True\n return False", "def _parse_hosts(self):\n hosts = dict()\n for address, h_cfg in self.host_configs.items():\n formatted_address = eval(address)\n os_cfg, srv_cfg, proc_cfg = self._construct_host_config(h_cfg)\n value = self._get_host_value(formatted_address, h_cfg)\n hosts[formatted_address] = Host(\n address=formatted_address,\n os=os_cfg,\n services=srv_cfg,\n processes=proc_cfg,\n firewall=h_cfg[u.HOST_FIREWALL],\n value=value\n )\n self.hosts = hosts", "def validate(self):\n if not self.keys:\n raise ValueError(\"Virtual host missing keys\")\n for i in self.keys:\n i.validate()", "def test_doesnt_report_disabled_hosts_as_up2(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')\n s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')\n db.service_update(self.context, s1['id'], {'disabled': True})\n db.service_update(self.context, s2['id'], {'disabled': True})\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(0, len(hosts))\n compute1.kill()\n compute2.kill()", "def getComponentLayoutValidations(self, services, hosts):\n items = super(KDP10StackAdvisor, self).getComponentLayoutValidations(services, hosts)\n\n # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible\n # Use a set for fast lookup\n hostsSet = set(super(KDP10StackAdvisor, self).getActiveHosts([host[\"Hosts\"] for host in hosts[\"items\"]])) #[host[\"Hosts\"][\"host_name\"] for host in hosts[\"items\"]]\n hostsCount = len(hostsSet)\n\n componentsListList = [service[\"components\"] for service in services[\"services\"]]\n componentsList = [item for sublist in componentsListList for item in sublist]\n nameNodeHosts = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if component[\"StackServiceComponents\"][\"component_name\"] == \"NAMENODE\"]\n secondaryNameNodeHosts = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if component[\"StackServiceComponents\"][\"component_name\"] == \"SECONDARY_NAMENODE\"]\n\n # Validating cardinality\n for component in componentsList:\n if component[\"StackServiceComponents\"][\"cardinality\"] is not None:\n componentName = component[\"StackServiceComponents\"][\"component_name\"]\n componentDisplayName = component[\"StackServiceComponents\"][\"display_name\"]\n componentHosts = []\n if component[\"StackServiceComponents\"][\"hostnames\"] is not None:\n componentHosts = [componentHost for componentHost in component[\"StackServiceComponents\"][\"hostnames\"] if componentHost in hostsSet]\n componentHostsCount = len(componentHosts)\n cardinality = str(component[\"StackServiceComponents\"][\"cardinality\"])\n # cardinality types: null, 1+, 1-2, 1, ALL\n message = None\n if \"+\" in cardinality:\n hostsMin = int(cardinality[:-1])\n if componentHostsCount < hostsMin:\n message = \"At least {0} {1} components should be installed in cluster.\".format(hostsMin, componentDisplayName)\n elif \"-\" in cardinality:\n nums = cardinality.split(\"-\")\n hostsMin = int(nums[0])\n hostsMax = int(nums[1])\n if componentHostsCount > hostsMax or componentHostsCount < hostsMin:\n message = \"Between {0} and {1} {2} components should be installed in cluster.\".format(hostsMin, hostsMax, componentDisplayName)\n elif \"ALL\" == cardinality:\n if componentHostsCount != hostsCount:\n message = \"{0} component should be installed on all hosts in cluster.\".format(componentDisplayName)\n else:\n if componentHostsCount != int(cardinality):\n message = \"Exactly {0} {1} components should be installed in cluster.\".format(int(cardinality), componentDisplayName)\n\n if message is not None:\n items.append({\"type\": 'host-component', \"level\": 'ERROR', \"message\": message, \"component-name\": componentName})\n\n # Validating host-usage\n usedHostsListList = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if not self.isComponentNotValuable(component)]\n usedHostsList = [item for sublist in usedHostsListList for item in sublist]\n nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]\n for host in nonUsedHostsList:\n items.append( { \"type\": 'host-component', \"level\": 'ERROR', \"message\": 'Host is not used', \"host\": str(host) } )\n\n return items", "def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)", "def test_reports_enabled_hosts_as_up_no_queue(self):\n # NOTE(vish): constructing service without create method\n # because we are going to use it without queue\n compute1 = service.Service('host1',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute1.start()\n compute2 = service.Service('host2',\n 'nova-compute',\n 'compute',\n FLAGS.compute_manager)\n compute2.start()\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def test_parse_url_lowercase_host() -> None:\n assert indieauth._parse_url(\"http://ex.com/hello\").path == \"/hello\"\n assert indieauth._parse_url(\"http://EX.COM/hello\").hostname == \"ex.com\"\n\n parts = indieauth._parse_url(\"http://EX.COM:123/HELLO\")\n assert parts.netloc == \"ex.com:123\"\n assert parts.path == \"/HELLO\"", "def all_paths_must_exist(cls, values):\n\n def check_model_path_fields_exist(model):\n for f in filter(lambda f: (f.type_ == pathlib.Path),\n model.__fields__.values()):\n p = validate_path_exists(getattr(model, f.name), values.get('yaml'))\n setattr(model, f.name, p)\n\n for c in values.get('configs'):\n check_model_path_fields_exist(c)\n\n for t in values.get('tests'):\n check_model_path_fields_exist(t)\n\n return values", "def __init__(self,\n hosts=None,\n ):\n\n # Initialize members of the class\n self.hosts = hosts", "def validate(self, apiobj, method, api, param, safe):\n if method in ('PUT', 'DELETE'):\n validate_strlist('site_name', param, safe, RX_SITE)\n validate_strlist('ce', param, safe, RX_FQDN)\n validate_strlist('release', param, safe, RX_RELEASE)\n validate_strlist('arch', param, safe, RX_ARCH)\n validate_lengths(safe, 'site_name', 'ce', 'release', 'arch')\n # Delay authz until we have database connection for name remapping.", "def check_all_hosts (self, repo_version_id, version_name):\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query1 = \"SELECT chm.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}';\".format(self.cluster_name)\n else:\n query1 = \"SELECT h.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}' JOIN hosts h ON chm.host_id = h.host_id;\".format(self.cluster_name)\n\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query2 = \"SELECT hv.host_name, hv.state FROM host_version hv WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n else:\n #query2 = \"SELECT hv.state,h.host_name FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n query2 = \"SELECT hv.state,h.host_name, hs.health_status,hs.agent_version,(h.total_mem/1024/1024) as total_mem_gb,(hs.available_mem/1024/1024) as available_mem_gb FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id JOIN hoststate hs ON h.host_id = hs.host_id WHERE hv.repo_version_id = {0} order by h.host_name;\".format(repo_version_id)\n # All cluster hosts\n host_names = set()\n self.cursor.execute(query1)\n rows = self.cursor.fetchall()\n if self.options.verbose:\n Logger.debug(query1 + \"\\n\")\n if rows and len(rows) > 0:\n host_names = set([row[0] for row in rows if len(row) == 1])\n Logger.debug(\"Hosts: {0}\".format(\", \".join(host_names)))\n\n host_name_to_state = {} # keys should be a subset of host_names\n hosts_with_repo_version_state_not_in_current = set()\n self.cursor.execute(query2 + \"\\n\")\n rows = self.cursor.fetchall()\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST(S) STATE\\t\")\n Logger.info(\"******************************************************************************************************************************************************\\n\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n Logger.info(\"State\\t\\tHostname\\t\\t\\t\\tHealth\\t\\tAgentVersion\\tTotalMemory\\tAvailableMemory\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n\n if rows and len(rows) > 0:\n for row in range(len(rows)):\n data = json.loads(rows[row][2])\n data1 = json.loads(rows[row][3])\n Logger.info(\"{0}\\t\\t{1}\\t\\t{2}\\t\\t{3}\\t\\t{4}\\t\\t{5}\".format(rows[row][0], rows[row][1], data[\"healthStatus\"], data1[\"version\"], rows[row][4], rows[row][5]))\n print (\"\\n\")\n Logger.debug(query2)\n if rows and len(rows) > 0:\n for row in rows:\n if len(row) == 6:\n host_name = row[1]\n state = row[0]\n host_name_to_state[host_name] = state\n if state.upper() != \"CURRENT\":\n hosts_with_repo_version_state_not_in_current.add(host_name)\n host_names_with_version = set(host_name_to_state.keys())\n host_names_without_version = host_names - host_names_with_version\n # Logger.info(\"\\t\\tHost(s) state Summary\")\n if len(host_names) > 0:\n if len(host_names_without_version) > 0:\n Logger.error(\"{0} host(s) do not have a Host Version for Repo Version {1}.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(host_names_without_version), version_name, \", \".join(host_names_without_version)))\n\n if len(hosts_with_repo_version_state_not_in_current) > 0:\n Logger.error(\"{0} host(s) have a Host Version for Repo Version {1} but the state is not CURRENT.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(hosts_with_repo_version_state_not_in_current), version_name, \", \".join(hosts_with_repo_version_state_not_in_current)))\n\n if len(host_names_without_version) == 0 and len(hosts_with_repo_version_state_not_in_current) == 0:\n Logger.info(\"Found {0} host(s) in the cluster, and all have a Host Version of CURRENT for \" \\\n \"Repo Version {1}. Things look good.\\n\".format(len(host_names), version_name))\n else:\n Logger.error(\"Make sure that all of these hosts are heartbeating, that they have the packages installed, the\\n\" \\\n \"hdp-select symlinks are correct, and that the services on these hosts have been restarated.\\n\")\n pass", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def check_remove_hosts(self, export_details):\n\n playbook_host_dict = self.create_current_host_dict_playbook()\n remove_host_dict = dict()\n host_type_list = ['no_access_hosts', 'read_only_hosts',\n 'read_write_hosts', 'read_only_root_hosts',\n 'read_write_root_hosts']\n\n for host_type in host_type_list:\n if playbook_host_dict[host_type]:\n hosts_to_remove = list()\n ipv4_hosts, ipv6_hosts, fqdn_hosts = \\\n self.get_export_hosts(export_details[host_type])\n for host in playbook_host_dict[host_type]:\n version = check_ipv4_ipv6_fqdn(host)\n\n # Check if host is FQDN/Netgroup or IP\n if version:\n if version == 4:\n # IPv4 host is provided\n ipv4_host = self.get_ipv4_host(host)\n # Check if given host is member of already added\n # network\n if ipv4_host in ipv4_hosts:\n if str(ipv4_host.with_netmask) not in \\\n hosts_to_remove:\n hosts_to_remove.append(\n str(ipv4_host.with_netmask))\n else:\n # IPv6 host is provided\n ipv6_host = self.get_ipv6_host(host)\n # Check if given host is member of already added\n # network\n if ipv6_host in ipv6_hosts:\n if str(ipv6_host.with_prefixlen) not in \\\n hosts_to_remove:\n hosts_to_remove.append(\n str(ipv6_host.with_prefixlen))\n else:\n # FQDN/Netgroup is provided\n if host in fqdn_hosts:\n if host not in hosts_to_remove:\n hosts_to_remove.append(host)\n\n if hosts_to_remove:\n remove_host_dict['remove_' + host_type] = hosts_to_remove\n\n LOG.info(\"Host list to remove: %s\", remove_host_dict)\n return remove_host_dict", "def _check_path_availability(self, methods: typing.Iterable[str, ...]) -> None:\n\n for method in methods:\n self.analizer._check_path_availability(method)", "def pre_config_checks(self):\n\n\t\tif self.host is not None:\n\t\t\tself.tell(\"Doing pre-config checks\")\n\n\t\tself.do_checklist([])", "def test_attributes(self):\n self.assertEqual(self.client.host, self.test_host)\n self.assertEqual(self.client.auth.host, self.test_host)", "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"" ]
[ "0.5663521", "0.5292894", "0.52131265", "0.51896125", "0.51140106", "0.51105917", "0.5073687", "0.5060623", "0.50083005", "0.499502", "0.48528996", "0.48389998", "0.48383403", "0.48131078", "0.4779494", "0.4760623", "0.47501546", "0.47221673", "0.46816412", "0.46804914", "0.4661847", "0.46372575", "0.46233922", "0.46209094", "0.45995277", "0.45973685", "0.45951664", "0.4568063", "0.45617467", "0.4546668" ]
0.5357099
1
Finds all files or directories on remote machine, according to given attributes.
def find_remote_files(remote_path, type, ssh): (ssh_in, ssh_out, ssh_err) = ssh.exec_command("find %s -name \"*\" -type %s" % (remote_path, type)) files = [] for file in ssh_out.readlines(): files.append(file.rstrip()) return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_local_files(local_path, type):\n local_out = commands.getoutput(\"find %s -name \\\"*\\\" -type %s\" % (local_path, type))\n files = []\n for file in local_out.split(\"\\n\"):\n files.append(file)\n return files", "def find_remote_files(product, date, channel, fs, mesoregion=None):\n if 'L1' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*M[36]C'.replace(\"{mesoregion}\", mesoregion) + str(channel) + '*.nc')]\n elif 'L2' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*'.replace(\"{mesoregion}\", mesoregion) + str(product) + '*M[36]' + '*.nc')]\n\n files = [y for x in files for y in x]\n\n return files", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def __request_virdir(self):\n for pn in self.peernames:\n with socket.socket() as tmpsock:\n tmpsock.connect(tuple(pn))\n\n # Solicitud\n tmpsock.send(message.REQDIRMSG)\n header = tmpsock.recv(5)\n\n if header[0] != message.GIVEDIR:\n raise ValueError(\n \"[SHARE] error al solicitar archivos a\", pn)\n\n # Primero se le piden sus archivos\n bodysize = int.from_bytes(header[1:5], byteorder=\"big\")\n body = tmpsock.recv(bodysize)\n port, sharelist = message.parse_file_bytes(body, bodysize)\n self.__add_sharefiles(sharelist, pn)\n\n # Despues le comparte los suyos\n tmpsock.send(message.build_givedir_message(\n self.port, self.files))", "def find(cls, attrs):\n return [cls(data) for data in cls.db().find(attrs, True)]", "def find_remotes(self):\n\n attrs = ['name', 'fetch', 'review']\n remotes = dict()\n\n for remote in self.tree.findall('remote'):\n values = [remote.get(attr) for attr in attrs]\n remote_dict = dict(zip(attrs, values))\n remote_name = remote_dict.pop('name')\n\n if remote_name is None or remote_dict['fetch'] is None:\n if self.fail_on_invalid:\n raise InvalidManifest(\n 'Remote entry missing \"name\" or \"fetch\" attribute'\n )\n else:\n continue\n\n if remote_name in remotes:\n raise InvalidManifest(\n 'Remote entry duplicates previous remote entry'\n )\n\n remotes[remote_name] = self.generate_data_dict(remote_dict)\n\n self.remotes = remotes", "def ListFiles(self, ext_attrs=None):\n del ext_attrs # Unused.\n\n if not self.IsDirectory():\n return\n\n if self.hive is None:\n for name in dir(winreg):\n if name.startswith(\"HKEY_\"):\n response = rdf_client_fs.StatEntry(st_mode=stat.S_IFDIR)\n response_pathspec = self.pathspec.Copy()\n response_pathspec.last.path = utils.JoinPath(\n response_pathspec.last.path, name)\n response.pathspec = response_pathspec\n\n yield response\n return\n\n try:\n with OpenKey(self.hive, self.local_path) as key:\n (self.number_of_keys, self.number_of_values,\n self.last_modified) = QueryInfoKey(key)\n\n # First keys - These will look like directories.\n for i in range(self.number_of_keys):\n try:\n name = EnumKey(key, i)\n key_name = utils.JoinPath(self.local_path, name)\n\n try:\n # Store the default value in the stat response for values.\n with OpenKey(self.hive, key_name) as subkey:\n value, value_type = QueryValueEx(subkey, \"\")\n except OSError:\n value, value_type = None, None\n\n response = self._Stat(name, value, value_type)\n # Keys look like Directories in the VFS.\n response.st_mode = stat.S_IFDIR\n\n yield response\n except OSError:\n pass\n\n # Now Values - These will look like files.\n for i in range(self.number_of_values):\n try:\n name, value, value_type = EnumValue(key, i)\n response = self._Stat(name, value, value_type)\n\n # Values look like files in the VFS.\n response.st_mode = stat.S_IFREG\n\n yield response\n\n except OSError:\n pass\n except OSError as e:\n raise IOError(\"Unable to list key %s: %s\" % (self.key_name, e))", "def get_files(self, step):\n dht = get_remote_node(self.dht_ip, self.dht_port)\n files = dht.get(get_hash(filestep + \"|\" + str(step)))\n return files", "def findattrs(self, srvurl, attrids = \"\", scopelist = \"default\",\n callback = None, cbdata = None):\n cb = callback\n if not callback:\n cb = self.__attrcb\n cbdata = [ SLPError.SLP_OK, {} ]\n err = self.slph.findattrs(srvurl, slpstr(scopelist),\n slpstr(attrids), cb, cbdata) \n if not callback:\n if cbdata[0] != SLPError.SLP_OK:\n raise SLPError(cbdata[0])\n return cbdata[1]", "def findMayaFiles(directory):\n\n pass", "def find(query):\n acc = []\n for root, dirs, files in os.walk(query, topdown=False):\n for name in files:\n acc += [os.path.join(root, name)]\n return acc", "def search(regex, paths, args, ignore_case=False, verbose=False):\n printer = MultiLinePrinter()\n for path in paths:\n if os.path.isdir(path):\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n if not KNOWN_TYPES or any([filename.endswith(suffix) for\n suffix in KNOWN_TYPES]):\n search_file(os.path.join(dirname, filename), regex,\n ignore_case, args.undefined, printer)\n else:\n search_file(path, regex, ignore_case, args.undefined, printer)", "def list_remote_files(tag=None, inst_id=None, data_path=None, format_str=None,\n start=None, stop=None, test_dates=None, user=None,\n password=None, mangle_file_dates=False,\n test_list_remote_kwarg=None):\n\n # Support keyword testing\n logger.info(''.join(('test_list_remote_kwarg = ',\n str(test_list_remote_kwarg))))\n\n # Determine the appropriate date range for the fake files\n if start is None:\n start = test_dates[''][''] - pds.DateOffset(years=1)\n\n if stop is None:\n stop = (test_dates[''][''] + pds.DateOffset(years=2)\n - pds.DateOffset(days=1) + pds.DateOffset(months=1))\n\n file_date_range = pds.date_range(start, stop)\n\n return list_files(tag=tag, inst_id=inst_id, data_path=data_path,\n format_str=format_str, file_date_range=file_date_range,\n mangle_file_dates=mangle_file_dates,\n test_dates=test_dates)", "def ls(path, filter=None):", "def find_URLs(directory, options):\n\n files = os.listdir(directory)\n filtered_files = []\n files_for_download = []\n for item in files:\n if item.endswith(\".json\"):\n filtered_files.append(item)\n\n for item in filtered_files:\n file_path = os.path.join(directory, item)\n\n with open(file_path, \"r\") as json_file:\n payload = json.load(json_file)\n for message in payload:\n if (\"subtype\" in message\n and message.get(\"subtype\") == \"file_share\"):\n\n download_URL = message.get(\"file\").get(\"url_download\")\n\n if options.remote_name:\n download_filename = message.get(\"file\").get(\"id\")\n else:\n download_filename = message.get(\"file\").get(\"name\")\n if download_filename.startswith(\"-.\"):\n download_filename = download_filename.lstrip(\"-\")\n download_filename = \"{}{}\".format(\n message.get(\"file\").get(\"id\"),\n download_filename)\n\n files_for_download.append(\n (download_filename, download_URL))\n\n download_URLs(files_for_download, directory)", "def search_ldap(connection, search_base, attrlist):\n if (connection and search_base):\n if (attrlist):\n ldap_result = connection.search_s(search_base, ldap.SCOPE_SUBTREE, attrlist=attrlist)\n else:\n ldap_result = connection.search_s(search_base, ldap.SCOPE_SUBTREE) \n else:\n print \"Error: search_ldap: Connection object or search base argument given was not valid.\"\n print\n sys.exit(1)\n\n return ldap_result", "def listfiles(self, *args, **kwargs):\n recursive = kwargs.get(\"recursive\", True)\n self._download_server_info()\n if self._info:\n return [a for a in self._info.keys() if _is_prefix(args, a)]\n text = self._open(*args).text\n parser = _FindLinksParser()\n parser.feed(text)\n links = parser.links\n files = [args + (f,) for f in links if not f.endswith(\"/\") and not f.endswith(\".info\")]\n if recursive:\n for f in links:\n if f.endswith(\"/\"):\n f = f.strip(\"/\")\n nargs = args + (f,)\n files.extend([a for a in self.listfiles(*nargs, recursive=True)])\n return files", "def _get_remote_files(config):\n if \"cache\" in config:\n return config[\"cache\"]\n out = {}\n for project, folder in _remote_folders(config):\n out.update(_project_files(project, folder))\n return out", "def unix_find(pathin):\n return [os.path.join(path, file)\n for (path, dirs, files) in os.walk(pathin, followlinks=False)\n for file in files]", "def search(self, remote_path, remote_ids, search_query, nb_skip, nb_returns, storage_id=None):\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.search(remote_ids, search_query, nb_skip, nb_returns)", "def find_all(self):\n pass", "def find_all(self):", "def remote_pull(*keys):", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def _find_files(\n root: str,\n includes: Union[List[str], str],\n excludes: Optional[List[str]] = None,\n follow_symlinks: bool = False,\n) -> Iterator[str]:\n root = os.path.abspath(root)\n file_set = formic.FileSet(\n directory=root, include=includes, exclude=excludes, symlinks=follow_symlinks\n )\n yield from file_set.qualified_files(absolute=False)", "def find_images(\n ami_name=None,\n executable_by=None,\n owners=None,\n image_ids=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n):\n retries = 30\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n while retries:\n try:\n filter_parameters = {\"filters\": {}}\n if image_ids:\n filter_parameters[\"image_ids\"] = [image_ids]\n if executable_by:\n filter_parameters[\"executable_by\"] = [executable_by]\n if owners:\n filter_parameters[\"owners\"] = [owners]\n if ami_name:\n filter_parameters[\"filters\"][\"name\"] = ami_name\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n images = conn.get_all_images(**filter_parameters)\n log.debug(\n \"The filters criteria %s matched the following images:%s\",\n filter_parameters,\n images,\n )\n if images:\n if return_objs:\n return images\n return [image.id for image in images]\n else:\n return False\n except boto.exception.BotoServerError as exc:\n if exc.error_code == \"Throttling\":\n log.debug(\"Throttled by AWS API, will retry in 5 seconds...\")\n time.sleep(5)\n retries -= 1\n continue\n log.error(\"Failed to convert AMI name `%s` to an AMI ID: %s\", ami_name, exc)\n return False\n return False", "def find_all(name, path):\n result = []\n try:\n if platform._is_win: # pragma: windows\n if path is None:\n out = subprocess.check_output([\"where\", name],\n env=os.environ,\n stderr=subprocess.STDOUT)\n else:\n out = subprocess.check_output([\"where\", \"/r\", path, name],\n env=os.environ,\n stderr=subprocess.STDOUT)\n else:\n args = [\"find\", path, \"-type\", \"f\", \"-name\", name]\n pfind = subprocess.Popen(args, env=os.environ,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n (stdoutdata, stderrdata) = pfind.communicate()\n out = stdoutdata\n for l in stderrdata.splitlines():\n if backwards.unicode2bytes('Permission denied') not in l:\n raise subprocess.CalledProcessError(pfind.returncode,\n ' '.join(args),\n output=stderrdata)\n except subprocess.CalledProcessError:\n out = ''\n if not out.isspace():\n result = sorted(out.splitlines())\n result = [os.path.normcase(os.path.normpath(m.decode('utf-8'))) for m in result]\n return result", "def locate_all_users(self, fields=\"all\"):\n if fields == \"all\":\n return_fields = all_fields\n else:\n return_fields = fields\n return self.ldap_connection.search_s(\"ou=Users,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, \"uid=*\", return_fields)", "def remote_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is not None:\r\n results.extend([node.text for node in filter(is_import_node, self.imports_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n if self.folders_node is not None:\r\n results.extend([node.text for node in filter(is_folder_node, self.folders_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n return results" ]
[ "0.57766545", "0.55919", "0.54436123", "0.5392925", "0.53435534", "0.53391916", "0.5281884", "0.5257585", "0.5255809", "0.5195542", "0.5191573", "0.51889825", "0.51854223", "0.51667845", "0.5155281", "0.5135752", "0.511923", "0.5106351", "0.5089739", "0.50852174", "0.5084342", "0.5061314", "0.5055684", "0.5051764", "0.5051764", "0.50439674", "0.50378853", "0.5034536", "0.50267214", "0.50221545" ]
0.6770828
0
Finds all files or directories on local machine, according to given attributes.
def find_local_files(local_path, type): local_out = commands.getoutput("find %s -name \"*\" -type %s" % (local_path, type)) files = [] for file in local_out.split("\n"): files.append(file) return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(regex, paths, args, ignore_case=False, verbose=False):\n printer = MultiLinePrinter()\n for path in paths:\n if os.path.isdir(path):\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n if not KNOWN_TYPES or any([filename.endswith(suffix) for\n suffix in KNOWN_TYPES]):\n search_file(os.path.join(dirname, filename), regex,\n ignore_case, args.undefined, printer)\n else:\n search_file(path, regex, ignore_case, args.undefined, printer)", "def findMayaFiles(directory):\n\n pass", "def find(query):\n acc = []\n for root, dirs, files in os.walk(query, topdown=False):\n for name in files:\n acc += [os.path.join(root, name)]\n return acc", "def _find_files(\n root: str,\n includes: Union[List[str], str],\n excludes: Optional[List[str]] = None,\n follow_symlinks: bool = False,\n) -> Iterator[str]:\n root = os.path.abspath(root)\n file_set = formic.FileSet(\n directory=root, include=includes, exclude=excludes, symlinks=follow_symlinks\n )\n yield from file_set.qualified_files(absolute=False)", "def find_all(name, path):\n result = []\n try:\n if platform._is_win: # pragma: windows\n if path is None:\n out = subprocess.check_output([\"where\", name],\n env=os.environ,\n stderr=subprocess.STDOUT)\n else:\n out = subprocess.check_output([\"where\", \"/r\", path, name],\n env=os.environ,\n stderr=subprocess.STDOUT)\n else:\n args = [\"find\", path, \"-type\", \"f\", \"-name\", name]\n pfind = subprocess.Popen(args, env=os.environ,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n (stdoutdata, stderrdata) = pfind.communicate()\n out = stdoutdata\n for l in stderrdata.splitlines():\n if backwards.unicode2bytes('Permission denied') not in l:\n raise subprocess.CalledProcessError(pfind.returncode,\n ' '.join(args),\n output=stderrdata)\n except subprocess.CalledProcessError:\n out = ''\n if not out.isspace():\n result = sorted(out.splitlines())\n result = [os.path.normcase(os.path.normpath(m.decode('utf-8'))) for m in result]\n return result", "def collect_local(self, path, req_tag=True):\n for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:\n if not os.path.isfile(f):\n continue\n self.collect_single(f, req_tag)", "def unix_find(pathin):\n return [os.path.join(path, file)\n for (path, dirs, files) in os.walk(pathin, followlinks=False)\n for file in files]", "def scan(self,project_dir):\n ftypes = [\".csv\", \".data\", \".xlsx\"]\n print(\"Scanning directory : \",project_dir)\n print(\"Searching for : \",ftypes)\n self.localfiles = {}\n for dirpath, dirnames, filenames in os.walk(project_dir, topdown=True):\n for filename in filenames:\n for ftype in ftypes:\n if ftype in filename:\n self.localfiles[filename] = {\n \"filename\": filename,\n \"filesize\": getsize(os.path.join(dirpath, filename)),\n \"abspath\": os.path.join(dirpath, filename),\n \"dirpath\": dirpath,\n \n }\n print(\"Found These: \",[file_name for file_name in self.localfiles.keys()])", "def locate(root = '.', target = 'info'):\n \n matches = []\n \n for root, dirnames, filenames in os.walk(root):\n for dirnames in fnmatch.filter(dirnames, target):\n matches.append(os.path.join(root, dirnames))\n \n return matches", "def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load", "def ls(path, filter=None):", "def find(cls, paths):\r\n pythons = []\r\n for path in paths:\r\n for fn in cls.expand_path(path):\r\n basefile = os.path.basename(fn)\r\n if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):\r\n try:\r\n pythons.append(cls.from_binary(fn))\r\n except Exception as e:\r\n TRACER.log('Could not identify %s: %s' % (fn, e))\r\n continue\r\n return pythons", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def grab_files(local_list=local_list,\n acts=acts,\n class_dict=class_dict,\n verbose=verbose,\n imagenet_root='/storage/data/imagenet_2012/',\n in_class_sub_dirs=True):\n selected_image_list = []\n found_classes = []\n for selected_point in local_list:\n # grab filename\n selected_file = acts.get_file_name(selected_point).decode('UTF-8')\n if verbose:\n pass\n #print(selected_file)\n class_dir_label = selected_file.split('_')[0]\n if in_class_sub_dirs:\n # we've assumed files are in folders labelled by class!\n selected_image_list.append(imagenet_root + class_dir_label + '/' + selected_file)\n else:\n selected_image_list.append(imagenet_root + selected_file)\n class_no = class_dict[selected_file.split('_')[0]]\n if not class_no in found_classes:\n found_classes.append(class_no)\n return selected_image_list", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def finder(files, queries):\n # Create a list of file names mapped to all paths that lead to them\n paths = {}\n for file in files:\n fname = file.split('/')[-1]\n paths[fname] = paths.get(fname, [])\n paths[fname].append(file)\n\n # Iterate over all queries, and add the file paths pointing to each query\n # to result and return it\n result = []\n for query in queries:\n result += paths.get(query, [])\n return result", "def _populate_params(self):\n self.params = []\n for root, dirs, files in os.walk(os.curdir):\n for file in files:\n fullfile = str(os.path.join(root, file))\n if self.config.regex_find_params.match(fullfile):\n self.params.append(fullfile)", "def find_remote_files(remote_path, type, ssh):\n (ssh_in, ssh_out, ssh_err) = ssh.exec_command(\"find %s -name \\\"*\\\" -type %s\" % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files", "def ListFiles(self, ext_attrs=None):\n del ext_attrs # Unused.\n\n if not self.IsDirectory():\n return\n\n if self.hive is None:\n for name in dir(winreg):\n if name.startswith(\"HKEY_\"):\n response = rdf_client_fs.StatEntry(st_mode=stat.S_IFDIR)\n response_pathspec = self.pathspec.Copy()\n response_pathspec.last.path = utils.JoinPath(\n response_pathspec.last.path, name)\n response.pathspec = response_pathspec\n\n yield response\n return\n\n try:\n with OpenKey(self.hive, self.local_path) as key:\n (self.number_of_keys, self.number_of_values,\n self.last_modified) = QueryInfoKey(key)\n\n # First keys - These will look like directories.\n for i in range(self.number_of_keys):\n try:\n name = EnumKey(key, i)\n key_name = utils.JoinPath(self.local_path, name)\n\n try:\n # Store the default value in the stat response for values.\n with OpenKey(self.hive, key_name) as subkey:\n value, value_type = QueryValueEx(subkey, \"\")\n except OSError:\n value, value_type = None, None\n\n response = self._Stat(name, value, value_type)\n # Keys look like Directories in the VFS.\n response.st_mode = stat.S_IFDIR\n\n yield response\n except OSError:\n pass\n\n # Now Values - These will look like files.\n for i in range(self.number_of_values):\n try:\n name, value, value_type = EnumValue(key, i)\n response = self._Stat(name, value, value_type)\n\n # Values look like files in the VFS.\n response.st_mode = stat.S_IFREG\n\n yield response\n\n except OSError:\n pass\n except OSError as e:\n raise IOError(\"Unable to list key %s: %s\" % (self.key_name, e))", "def find(cls, attrs):\n return [cls(data) for data in cls.db().find(attrs, True)]", "def find_file(filename, std_dirs, paths):\n\n # Check the standard locations\n for dir in std_dirs:\n f = os.path.join(dir, filename)\n print('looking for', f)\n if os.path.exists(f):\n return []\n\n # Check the additional directories\n for dir in paths:\n f = os.path.join(dir, filename)\n print('looking for', f)\n if os.path.exists(f):\n return [dir]\n\n # Not found anywhere\n return None", "def _find_files(research_structure, raise_on_all_missing=True):\n found = []\n filenames = []\n paths_searched = []\n ## config file lookup resolution\n for enforce_file_existence, cascaded, fun in research_structure:\n candidate = fun()\n if candidate is None:\n continue\n paths_searched.append(candidate)\n filenames.append((cascaded, candidate))\n if os.path.exists(candidate):\n found.append(candidate)\n if cascaded is False:\n break\n else:\n if enforce_file_existence:\n raise ValueError(\"File %r does not exists.\" % candidate)\n if not found and raise_on_all_missing:\n raise ValueError(\"No config file was found in those paths: %s.\"\n % ', '.join(paths_searched))\n return filenames", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tif target in element:\r\n\t\t\t\tfiles.append(path + os.sep + element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\r\n\t\t\tfiles.extend(findFiles(target, os.getcwd()))\r\n\t\t\tos.chdir(\"..\")\r\n\treturn files", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def findFile(self, start_path, find_pattern, ignore_pattern):\n\n for (dirname, subdirs, files) in self.ftp.walk(start_path):\n\n for file in files:\n\n file_finder = re.finditer(find_pattern, file, re.S)\n\n meta_pattern = re.compile(ignore_pattern)\n\n meta_match = meta_pattern.match(file)\n\n if not meta_match and file_finder:\n\n for filename in file_finder:\n self.file_path = dirname + \"/\" + filename.group( )\n\n self.paths.append(self.file_path)\n\n return self.paths", "def _findFiles(self, topLevelDirectory, extension=\".py\", foundFiles=None):\n \n #mutable default arguments in Python are evaluated once when the function is defined, not each time the function is called.\n if foundFiles == None:\n foundFiles = []\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n for filename in filenames:\n #need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0:1] != '.' and os.path.isfile(dirpath+\"/\"+filename):\n foundFiles.append(dirpath+\"/\"+filename)\n #print dirpath+\"/\"+filename\n return foundFiles", "def wingrep(self):\n for folder, files_ in self.walk():\n listed_files = self.list_appro_files(folder, files_)\n for file_o in self.open_files(listed_files=listed_files):\n self.search_in(file_o)", "def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass", "def get_all(file_types=None):\n return set(lsof_to_files(call_lsof(), file_types))" ]
[ "0.6211064", "0.6019969", "0.5998675", "0.5906028", "0.57402915", "0.5688038", "0.5669542", "0.5578637", "0.5574477", "0.55228025", "0.5517279", "0.54891247", "0.5458164", "0.5458164", "0.54517686", "0.5449568", "0.54423726", "0.54343617", "0.5415774", "0.54102737", "0.5378713", "0.53669256", "0.5366737", "0.536039", "0.5299206", "0.52854097", "0.52815896", "0.5277621", "0.5274709", "0.5260585" ]
0.6285848
0
Show list of movies.
def movie_list(): movies = Movie.query.order_by(Movie.title).all() return render_template("movie_list.html", movies=movies)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def movie_list():\n\n movies = Movie.query.order_by(\"title asc\").all()\n return render_template(\"/movie_list.html\", movies=movies)", "def movie_list():\n\n movies = Movie.query.order_by(Movie.movie_title).all()\n return render_template(\"movie_list.html\", movies=movies)", "def list_movie():\n if not MOVIES:\n print('No stored movies yet')\n\n for movie in MOVIES:\n print(f\"{movie['name']} ({movie['year']}) - Director by '{movie['director']}'\")", "def movie_list():\n\n # sort movie titles alphbetically\n movies = Movie.query.order_by(Movie.title).all()\n\n return render_template(\"movie_list.html\", movies=movies)", "def full_list_of_movies():\n\n movie_list = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movie_list=movie_list)", "def print_movies(movies):\n for movie in movies:\n print(movie)", "def movie_list():\n\n movies = Movie.query.order_by('title').join(Rating).all()\n # movies = Movie.query.options(db.joinedload('rating')).order_by('title').all()\n\n return render_template(\"movie_list.html\", movies=movies)", "def list_movies(self, longest_title_length):\n for i, movie in enumerate(self.movies):\n unwatched_string = ' '\n if not movie.is_watched:\n unwatched_string = '*'\n print(\"{:2}. {} {:{}} - {:5} ({})\".format(i, unwatched_string, movie.title, longest_title_length,\n movie.year, movie.category))", "def movie_list(self):\n return self._request_obj(self._urls[\"movie_list\"], key=\"genres\")", "def show_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def show_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie = movie)", "def open_movies_page(movies):\n # Create or overwrite the output file\n output_file = open('fresh_tomatoes.html', 'w')\n directors_list = create_dropdown_list_directors(movies)\n years_list = create_dropdown_list_years(movies)\n movies.sort(key=lambda x: x.rating, reverse=True)\n # Replace the movie tiles placeholder generated content\n content = create_movie_tiles_content(movies)\n rendered_content = main_page_content.format(\n movie_tiles= content,\n years = years_list,\n directors = directors_list)\n\n \n # Output the file\n output_file.write(main_page_head + rendered_content+main_page_cont)\n output_file.close()\n\n # open the output file in the browser (in a new tab, if possible)\n url = os.path.abspath(output_file.name)\n webbrowser.open('file://' + url, new=2)", "def get_movies(jwt):\n\n movies = Movie.query.all()\n\n return jsonify({\n 'success': True,\n 'movies': [movie.format() for movie in movies],\n }), 200", "def list_movies(category):\n movies = get_movies(category)\n listing = []\n for movie in movies:\n list_item = xbmcgui.ListItem(label=movie[0])\n list_item.setArt({'thumb': movie[1],\n 'icon': movie[1],\n 'fanart': movie[1]})\n list_item.setInfo('video', {'title': movie[0]})\n if 'Next Page' in movie[0]:\n url = '{0}?action=list_category&category={1}'.format(_url, movie[2])\n else:\n url = '{0}?action=list_movie&thumb={1}&movie={2}'.format(_url, movie[1], movie[2])\n is_folder = True\n listing.append((url, list_item, is_folder))\n xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n xbmcplugin.endOfDirectory(_handle)", "def devPrintFilteredMovieList(self, movieList):\n print \"--\"\n for movie in movieList:\n print movie\n print \"--\"", "def show_me_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def query_all_movies():\n result = session.query(Movie).all()\n print(\"total movies: %s\" % len(result))\n for movie in result:\n print(\"movie poster: %s\" % movie.poster)\n print(\"%s trailer:%s genre:%s user_id:%s\" %\n (movie.name, movie.trailer_url, movie.genre, movie.user_id))\n print(\"-------------------------------------------------\")", "def movie_lists(self, **kwargs):\n path = self._get_path('movie_lists')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "def open_movies_page(movies):\n # Replace the placeholder for the movie tiles with the actual dynamically generated content\n movie_tiles = create_movie_tiles_content(movies)\n\n # Wrap the header and footer content around the movie tiles\n with open('templates/header.html', 'r') as header, open('templates/footer.html', 'r') as footer:\n content = header.read() + movie_tiles + footer.read()\n\n # Create or overwrite the output file\n with open('index.html', 'w') as output_file:\n output_file.write(content)\n\n # open the output file in the browser\n url = os.path.abspath(output_file.name)\n webbrowser.open('file://' + url, new=2) # open in a new tab, if possible", "def all_movies(request):\n if request.method == 'GET':\n movies = Movie.objects.order_by('name')\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)", "def movie_videos(request, pk):\n if request.method == 'GET':\n videos_list = MovieVideos.objects.filter(movie_id=pk)\n serializer = MovieVideosSerializer(videos_list, many=True)\n return Response(serializer.data)", "def movies(self) -> List[Movie]:\n return super(MoviesManager, self).items", "def listMovies(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list_movies(self, base_uri, actor_id = None):\n if actor_id is None:\n movies = self.movies.values()\n else:\n movies = [movie for movie in self.movies.values()\n if actor_id in movie['actors']]\n movie_list = []\n for movie in movies:\n entry = {}\n entry['uri'] = base_uri + \"/movies/\" + movie['id']\n if movie.has_key('title'):\n entry['title'] = tornado.escape.xhtml_escape(movie['title'])\n if movie.has_key('synopsis'):\n entry['synopsis'] = tornado.escape.xhtml_escape(movie['synopsis'])\n if movie.has_key('actors'):\n \t\tactor_urls = []\n \t\tfor actor in movie['actors']:\n \t\t\tactor_urls.append(base_uri + \"/actors/\" + actor)\n \t\tentry['actors'] = actor_urls\n movie_list.append(entry)\n return movie_list", "def get_movies(self, **kwargs):\n self.url = f\"{self.base_url}{self.FILMS_URL}\"\n self.method = \"get\"\n self.params = self._prepare_query_params(kwargs)\n\n self._make_request()\n\n return self.response.json()", "def get_movies():\n tree = get_tree()\n movie_list = [movie.get(\"title\") for movie in tree.getroot().findall(\"movie\")]\n return movie_list", "def get_movies_list(self, world):\n api_url = self.api_url_base + '/api/{}/movies'.format(world)\n movies_dict = self.get_dict_from_apis(api_url)\n ret_dict = {world: None}\n if movies_dict is not None:\n ret_dict[world] = movies_dict['Movies']\n return ret_dict", "def populate_movies(self):\n self.lw_movies.clear()\n for movie in get_movies():\n lw_item = QtWidgets.QListWidgetItem(movie.title)\n # Methode pour joindre un objet a un str de ListWidget.\n lw_item.setData(QtCore.Qt.UserRole, movie)\n # Ajout du contenu du json dans ListWidget.\n self.lw_movies.addItem(lw_item)", "def get_movie_listing_for_movie_link(link):\n detailPage = requests.get(link)\n soup = BeautifulSoup(detailPage.text, 'html.parser')\n\n titleDiv = soup.find('div', attrs={'class': 'subpage_title_block__right-column'})\n titleText = titleDiv.find('a').text\n \n contentBodyDiv = soup.find('div', attrs={'id': 'fullcredits_content'})\n crew = [nameTd.text.strip() for nameTd in contentBodyDiv.find_all('td', attrs={'class': 'name'})]\n\n castListTable = soup.find('table', attrs={'class': 'cast_list'})\n castListRows = castListTable.find_all('tr', attrs={'class': re.compile('even|odd')})\n castList = [row.find_all('td')[1].text.strip() for row in castListRows]\n\n return MovieListing(link, titleText, castList, crew)", "def create_movielist():\n # Create the list of movies - let's pick 6\n movielist = []\n # title, box_art, url\n movielist.append(MovieMetadata(\"Toy Story\", \\\n \"I'm from Mattel. Well, I'm not really from Mattel, I'm actually \" \\\n \"from a smaller company that was purchased by Mattel in a leveraged \" \\\n \"buyout.\", \\\n \"http://ia.media-imdb.com/images/M/MV5BMTgwMjI4MzU5N15BMl5BanBnXkFtZ\" \\\n \"TcwMTMyNTk3OA@@._V1_SY317_CR12,0,214,317_AL_.jpg\", \\\n 'https://www.youtube.com/watch?v=KYz2wyBy3kc'))\n movielist.append(MovieMetadata(\"Avatar\", \\\n \"I was hoping for some kind of tactical plan that didn't involve \" \\\n \"martyrdom\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXk' \\\n 'FtZTcwODc5MTUwMw@@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=cRdxXPV9GNQ'))\n movielist.append(MovieMetadata(\"The Princess Bride\", \\\n \"When I was your age, television was called books. And this is a \" \\\n \"special book. It was the book my father used to read to me when I \" \\\n \"was sick, and I used to read it to your father. And today I'm gonna\" \\\n \" read it to you.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTkzMDgyNjQwM15BMl5BanBnXkFtZ' \\\n 'TgwNTg2Mjc1MDE@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=GNvy61LOqY0'))\n movielist.append(MovieMetadata(\"Serenity\", \\\n \"Shiny. Let's be bad guys.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTI0NTY1MzY4NV5BMl5BanBnXkFtZ' \\\n 'TcwNTczODAzMQ@@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=JY3u7bB7dZk'))\n movielist.append(MovieMetadata(\"The Wizard of Speed and Time\", \\\n \"Miss Belair, if you feel compelled to grab part of my body and \" \\\n \"shake it before you can even be friendly, you've got far worse \" \\\n \"problems than you think I have.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BODc3MzA3MDQyN15BMl5BanBnXkFtZ' \\\n 'TYwMzE2MTk5._V1_SX214_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=3ldOTw60Ozg'))\n movielist.append(MovieMetadata(\"Inside Out\", \\\n \"Take her to the moon for me. Okay?\", \\\n 'http://ia.media-imdb.com/images/M/MV5BOTgxMDQwMDk0OF5BMl5BanBnXkFtZ' \\\n 'TgwNjU5OTg2NDE@._V1_SX214_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=yRUAzGQ3nSY'))\n\n return movielist" ]
[ "0.77884346", "0.7738609", "0.7738429", "0.7603819", "0.7459214", "0.7193967", "0.7042107", "0.69520515", "0.68291026", "0.6809898", "0.6801339", "0.6761189", "0.6752064", "0.6726022", "0.66703576", "0.6598783", "0.65832853", "0.6565718", "0.6530468", "0.6495098", "0.64667565", "0.6465735", "0.64539415", "0.6402372", "0.6335166", "0.63206303", "0.63007385", "0.62610775", "0.6256811", "0.62414926" ]
0.77881587
1
Show profile for given user.
def show_user_profile(user_id): user = User.query.filter_by(user_id=user_id).first() return render_template("user_profile.html", user=user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/profile.html', user=user)", "def show_user(user_id):\n\n user = crud.get_user_by_id(user_id)\n\n return render_template('user_profile.html', user = user)", "def show_user():\n\n return render_template('user/show_by_user.html', title='Show Profile', user = current_user)", "def show_user_profile(user_id):\n\n # raise 401 if no one logged in\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user of whose profile is being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged in user for authenticated navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if user_id == session[CURRENT_USER_KEY]:\n profile_active = 'active'\n else:\n profile_active = ''\n\n return render_template('user_profile.html', profuser=profuser, user=user, profile_active=profile_active)", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def show_user_profile(user_id):\n\n user = User.query.filter_by(user_id=user_id).one()\n rating = Rating.query.filter_by(user_id=user_id).all()\n\n \n return render_template(\"user_detail.html\", user=user, rating=rating)", "def show_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template(\"show-user.html\", user=user)", "def show_user(user_id):\n user = User.query.get(user_id)\n\n return render_template('user.html',user=user)", "def user_view(cls, user, profile):\r\n pass", "def user_view(cls, user, profile):\n pass", "def show_me_user(user_id):\n\n user = crud.get_user_by_id(user_id)\n\n return render_template('user_details.html', user=user)", "def show_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/details.html\", user=user)", "def profile(request, user_name=None):\n \n # get the viewed user\n if user_name is None:\n user = request.user.get_profile()\n else:\n user = get_object_or_404(User, username=user_name)\n user = user.get_profile()\n \n # set display name\n if len(user.user.first_name) <= 0:\n user.display_name = user.user.username\n else:\n user.display_name = user.user.first_name + \" \" + user.user.last_name\n \n # set avatar path\n if len(user.avatar.name) <= 0:\n user.avatar_url = settings.MEDIA_URL + \"avatar/noavatar.png\"\n else:\n user.avatar_url = user.avatar.url\n \n # get tracked list, ownedlist and playlist\n trackedlist = user.trackedrecordlist_set.all()\n ownedlist = user.userentry_set.all()\n playlist = user.playlist_set.all()\n context = {\n 'profile_user': user,\n 'trackedlist': trackedlist,\n 'ownedlist': ownedlist,\n 'playlist': playlist\n }\n return render_to_response(\n 'usermgr/profile.html',\n context,\n context_instance = RequestContext(request))", "def show_user_info(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template(\"user_details.html\", user=user)", "async def profile(self, ctx, *, user: discord.Member = None):\n user = user or ctx.author\n profile = await self.cache.get_profile(user.id)\n if profile is None:\n res = self.plugin.data.responses.no_profile.format(user_name=user.name)\n return await ctx.send_line(res)\n await ctx.send(embed=await profile.get_embed())", "def view_profile():\n user_id = session.get(\"user_id\")\n \n user = User.query.get(session[\"user_id\"])\n \n return render_template(\"editable_profile_page.html\", user=user)", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def profile(self, user, **kwargs):\n # pylint: disable=no-member\n return self._get(API.USER.value.format(user_id=user), **kwargs)", "def profile():\n # Check if user is loggedin\n if 'loggedin' in session:\n # We need all the account info for the user so we can display it on the profile page\n response = requests.get(\n \"http://localhost:8080/api/userbyid/\"+str(session['userid']))\n acc = json.loads(response.text)\n # Show the profile page with account info\n return render_template('profile.html', account=acc)\n # users is not loggedin redirect to login page\n return redirect(url_for('site.login'))", "def profile(request, id):\n u = get_object_or_404(User, pk=id)\n context = ProfileContext(u).get_context()\n return render(request, 'wantedly_app/profile.html', context)", "def show(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n return render_template('users/show.html'\n ,user=user\n ,t=t\n ,m=m)", "def profile():\n\n user_id = session.get(\"user_id\")\n userbuses = crud.show_all_userbus(user_id)\n\n \n\n if user_id:\n user = crud.get_user_by_id(user_id)\n return render_template('user_profile.html', user=user, userbuses=userbuses)\n \n else:\n flash('Please sign in')\n return render_template('login.html')", "def profile():\r\n user_data = load_user(current_user.id, current_user)\r\n if user_data is not None:\r\n user, followers, following = user_data\r\n\r\n return render_base_template(\"profile.html\", profile=user, followers=followers,\r\n following=following,\r\n os_projects=[])\r\n\r\n return abort(404)", "def show_user(username):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get(username)\n\n return render_template(\"users/detail.html\", user=user)", "def do_user_show(cs, args):\n key = args.user\n if cs.users.is_id(key):\n id = key\n else:\n id = cs.users.get_id_by_name(key)\n _, user = cs.users.get(id)\n utils.print_dict(user)", "def profile():\n if g.user:\n return render_template('profile.html', user=g.user)\n return redirect(url_for('login'))", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n return render_template(\"profile.html\", username=username)", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def view_profile(request, userid=None):\n # Show the currently logged in user's profile if none is specified\n if userid is None:\n user = request.user\n else:\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user)\n dogs = Dog.objects.all().filter(owner=user)\n\n return render(request, 'woofer/view_profile.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def user_detail(user_id):\n\n user = User.query.get(user_id)\n return render_template(\"user.html\", user=user)" ]
[ "0.86739427", "0.863889", "0.837819", "0.81853336", "0.80494875", "0.8038482", "0.7862828", "0.78580785", "0.7854763", "0.78532773", "0.78529036", "0.7844797", "0.780445", "0.777881", "0.77440566", "0.77238154", "0.76296026", "0.7587681", "0.7576835", "0.7576826", "0.7546787", "0.7536748", "0.75324476", "0.74984604", "0.7468628", "0.7405273", "0.74048626", "0.7401499", "0.73643595", "0.73419076" ]
0.86867446
0
Show profile for given movie.
def show_movie_profile(movie_id): # movie object given a movie_id movie = Movie.query.filter_by(movie_id=movie_id).first() # list of all rating objects for a given movie_id ordered by user_id sorted_ratings = Rating.query.filter_by(movie_id=movie_id).order_by('user_id').all() return render_template("movie_profile.html", movie=movie, ratings=sorted_ratings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_me_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def show_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie = movie)", "def show_movie(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def showMovie(self, movie):\n self.updateStatusBar()\n self.movieDisplay.setMovie(movie)", "def crawl_movie_profile(movie_name, year=None):\n\n # Search\n query = _TITLE_QUERY.format(title=_convert_title(movie_name))\n search_res = bs(request.urlopen(query), \"html.parser\")\n tables = search_res.find_all(\"table\", {\"class\": \"findList\"})\n if len(tables) < 1:\n return {}\n res_table = tables[0]\n if year is None:\n movie_row = res_table.find_all(\"tr\")[0]\n else:\n for row in res_table.find_all(\"tr\"):\n if (str(year) in str(row)) or (str(year-1) in str(row)):\n movie_row = row\n movie_code = re.findall(_MOVIE_CODE_REGEX, str(movie_row))[0]\n\n # Movie Profile\n cur_profile_url = _PROFILE_URL.format(code=movie_code)\n prof_page = bs(request.urlopen(cur_profile_url), \"html.parser\")\n\n # Extracting properties\n props = {}\n props['name'] = movie_name\n props['rating'] = _get_rating(prof_page)\n props['rating_count'] = _get_rating_count(prof_page)\n props['genres'] = _get_geners(prof_page)\n props['user_review_count'], props['critic_review_count'] = \\\n _get_review_counts(prof_page)\n props['metascore'] = _get_metascore(prof_page)\n props['year'] = _get_year(prof_page)\n props['duration'] = _get_duration(prof_page)\n props.update(_get_box_office_props(prof_page))\n props.update(_get_rating_props(movie_code))\n props.update(_get_business_props(movie_code))\n props.update(_get_release_props(movie_code))\n props.update(_get_reviews_props(movie_code))\n return props", "def movie_details(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "async def profile(self, ctx, member: discord.Member = None):\n if member is None:\n member = ctx.author\n name = member.display_name\n id = member.id\n avatar = member.avatar_url\n created = member.created_at\n embed = discord.Embed(\n title=name,\n description=name + \"'s profile!\",\n color=discord.Colour.purple()\n )\n embed.set_thumbnail(url=avatar)\n embed.add_field(name=\"ID:\", value=str(id))\n embed.add_field(name=\"Created At:\", value=str(created))\n if ctx.guild is not None:\n joined_at = member.joined_at\n embed.add_field(name=\"Joined Server At:\", value=str(joined_at))\n if member.bot:\n embed.add_field(name=\"Bot?\", value=\"Yeah\")\n if member.premium_since is not None:\n premium = str(member.premium_since)\n embed.add_field(name=\"Premium Member Since: \", value=premium)\n else:\n embed.add_field(name=\"Premium?\", value=\"Nope\")\n top_role = str(member.top_role)\n embed.add_field(name=\"Top Role:\", value=top_role)\n await ctx.send(\"\", embed=embed)", "def movie_info(self, **kwargs):\n\n path = self._get_movie_id_path('details')\n resp = self._get_method(path, kwargs)\n return resp", "def view_profile(request, userid=None):\n # Show the currently logged in user's profile if none is specified\n if userid is None:\n user = request.user\n else:\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user)\n dogs = Dog.objects.all().filter(owner=user)\n\n return render(request, 'woofer/view_profile.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def list_movie():\n if not MOVIES:\n print('No stored movies yet')\n\n for movie in MOVIES:\n print(f\"{movie['name']} ({movie['year']}) - Director by '{movie['director']}'\")", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n return render_template(\"profile.html\", username=username)", "def display_profile(self):\n print(f\"Id: {self._id}\")\n print(f\"username: {self.username}\")\n print(f\"name: {self.name}\")\n print(f\"contact: {self.contact}\")\n print(f\"address: {self.address}\")", "def movie_profile(movie_id):\n\n if not session.get('logged_in_user_email'):\n flash(\"Please login or signup to see the movie details and rate the movie!\", \"danger\")\n return redirect(\"/signup-login\")\n\n else:\n\n # import pdb; pdb.set_trace();\n\n # Query by movie id to return that record in database about movie info\n # movie = Movie.query.filter(Movie.movie_id == movie_id).one()\n movie = Movie.query.get(movie_id)\n\n user = User.query.filter(User.email == session.get(\"logged_in_user_email\")).one()\n user_id = user.user_id\n\n if user_id:\n user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()\n else:\n user_rating = None\n\n # Prediction code: only predict if the user hasn't rated it\n prediction = None\n\n if (not user_rating) and user_id:\n user = User.query.get(user_id)\n if user:\n prediction = user.predict_rating(movie)\n\n # Either use the prediction or their real rating\n if prediction:\n # User hasn't scored; use our prediction if we made one\n effective_rating = prediction\n\n elif user_rating:\n # User has already scored for real; use that\n effective_rating = user_rating.score\n\n else:\n # User hasn't scored and we couldn't get a prediction\n effective_rating = None\n\n # Get the wizard's rating, either by predicting or using real rating\n wizard = User.query.filter_by(email=\"[email protected]\").one()\n wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()\n\n if wizard_rating is None:\n wizard_rating = wizard.predict_rating(movie)\n else:\n wizard_rating = wizard_rating.score\n\n if wizard_rating and effective_rating:\n difference = abs(wizard_rating - effective_rating)\n else:\n # We couldn't get a wizard rating, so we'll skip difference\n difference = None\n\n # Depending on how different we are from the Wizard, choose a message\n BERATEMENT_MESSAGES = [\n \"I suppose you don't have such bad taste after all.\",\n \"I regret every decision that I've ever made that has brought me to listen to your opinion.\",\n \"Words fail me, as your taste in movies has clearly failed you.\",\n \"That movie is great. For a clown to watch. Idiot.\",\n \"Words cannot express the awfulness of your taste.\"\n ]\n\n if difference is not None:\n beratement = BERATEMENT_MESSAGES[int(difference)]\n else:\n beratement = None\n\n # Tallies score of each rating (how many people rated this score per rating)\n # Returns list of tuples for count_score\n unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)\n ordered_movies = unordered_ratings.order_by(Rating.score)\n count_score = ordered_movies.all()\n\n # Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja\n avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()\n\n # Query to get all ratings for a specific movie\n # Needed to join Rating and Movie tables and filter by user id\n # Sort movie titles alphabetically\n ratings = db.session.query(Rating.movie_id,\n Rating.score,\n Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()\n\n # # Pass user info into jinja and called on its attributes\n # # Pass count_score, avg_rating, and ratings into jinja\n # return render_template(\"movie_profile.html\", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)\n\n return render_template(\n \"movie_profile.html\",\n movie=movie,\n user_rating=user_rating,\n avg_rating=avg_rating[0],\n count_score=count_score,\n prediction=prediction,\n ratings=ratings,\n beratement=beratement)", "def show_user():\n\n return render_template('user/show_by_user.html', title='Show Profile', user = current_user)", "def profile(request, id):\n u = get_object_or_404(User, pk=id)\n context = ProfileContext(u).get_context()\n return render(request, 'wantedly_app/profile.html', context)", "def movie_page(movie_id):\n\n current_movie = Movie.query.filter_by(movie_id=movie_id).first()\n title = current_movie.title\n released = current_movie.released_at\n url = current_movie.imdb_url\n thing = current_movie.movie_id\n\n movie_rating = db.session.query(Rating.score).join(Movie).filter(\n Movie.movie_id==thing).all()\n\n return render_template('movie_page.html', current_movie=current_movie, \n title=title, released=released, url=url, movie_rating=movie_rating)", "def show_user_profile(user_id):\n user = User.query.filter_by(user_id=user_id).first()\n\n return render_template(\"user_profile.html\", user=user)", "async def show_movie_info(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n if 'movie_list' in data and message.text not in data['movie_list']:\n await bot.send_message(message.chat.id, \"Ok, lets find another one\",\n reply_markup=types.ReplyKeyboardRemove())\n await make_choice(message, state)\n\n else:\n await Form.choosing.set()\n movie_info = await movie_api.get_movie_info(message.text)\n print(\"Is about to show info\")\n if movie_info is None:\n await message.reply(\"Movie not found\", reply_markup=types.ReplyKeyboardRemove())\n await Form.free.set()\n await start_again(message)\n else:\n await state.update_data(last_movie=message.text)\n\n if movie_info['general_overview']:\n await bot.send_message(message.chat.id, movie_info['general_overview'])\n else:\n await message.reply(\"No overview for the movie is available\")\n\n if movie_info['poster_path']:\n await bot.send_photo(message.chat.id, types.InputFile.from_url(movie_info['poster_path']))\n else:\n await message.reply( \"No poster for the movie is available\")\n\n await Form.free.set()\n await start_again(message)", "def show_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/profile.html', user=user)", "def get_movie(id):\n if id and request.headers['accept'] == \"application/json\":\n film = mongo_mgr.get_film_by_id (id)\n # handling template movies\n # if the requested film is still a template (name is None), the source\n # and the source_id will be used to get the full data\n if film['name'] is None:\n film = mediator.update_template_film (film)\n user_has_movie = mongo_mgr.user_has_movie(id, aaa.current_user.id)\n film['my_movie'] = user_has_movie\n return json.loads(dumps(film))\n else:\n return template(\"details.html\", user=aaa.current_user.username)", "def show_profile():\n print('LOGIN SESSION:', login_session)\n if 'userid' in login_session:\n category = session.query(Category).first()\n item = session.query(Item).first()\n return render_template('profile.html', login_session=login_session, root=app.instance_path, category=category,\n item=item)\n flash('Unfortunately you need to be logged in to see your profile', 'error')\n return redirect(url_for('show_homepage'))", "def setMovie(self, movie):\n if movie != None:\n self.titleText.set(movie.title)\n if isinstance(movie.director, types.StringTypes):\n self.directorText.set(movie.director)\n else:\n self.directorText.set(', '.join(movie.director))\n self.yearText.set(movie.year)\n self.ratingText.set(movie.rating)\n self._setMoviePicture(movie.coverUrl)\n else:\n self.reset()", "def do_user_show(cs, args):\n key = args.user\n if cs.users.is_id(key):\n id = key\n else:\n id = cs.users.get_id_by_name(key)\n _, user = cs.users.get(id)\n utils.print_dict(user)", "def show_user_profile(user_id):\n\n user = User.query.filter_by(user_id=user_id).one()\n rating = Rating.query.filter_by(user_id=user_id).all()\n\n \n return render_template(\"user_detail.html\", user=user, rating=rating)", "def movies_page(request):\n movie = get_object_or_404(Movie, pk=id)\n time_run = timezone.now()\n return render(request, \"movies/movies_page.html\",\n {\"movie\": movie, \"time_run\": time_run})", "def profile_detail(request, pk):\n profile = request.user.userprofile\n user_relationships = profile.get_relationships()\n user_request = profile.get_friend_request()\n\n context = {\n # 'user': user,\n 'profile': profile,\n 'user_relationships': user_relationships,\n 'user_request': user_request\n }\n\n return render(request, 'accounts/profile_detail.html', context)", "def profile(self, request):\n # Todo: Move the logic to Serializer\n identity = request.query_params.get('identity')\n if not identity:\n raise RequiredParameter('identity')\n\n try:\n video_client = VideoClient.objects.get(id=identity)\n except VideoClient.DoesNotExist:\n msg = _(\"Profile with identity %(identity)s does not exist\") % {'identity': identity}\n raise InvalidParameter('identity', message=msg)\n except ValueError:\n msg = _(\"Invalid identity\")\n raise InvalidParameter('identity', message=msg)\n\n res = ProfileSerializer(video_client.user, context={'request': request}).data\n return Response(res)", "def show_profiles(profiles, height=None, fname=None, **kwargs):\n _show_profiles(profiles, height, fname, **kwargs)", "def get_movie_info(movie_id):\n info = {}\n\n movie = tmdb.get_movie(movie_id)\n info['title'] = movie['original_title']\n info['genres'] = \", \".join(x['name'] for x in movie['genres'])\n info['plot'] = movie['overview']\n info['year'] = movie['release_date'][:4]\n\n cast = movie['credits']['cast']\n info['actors'] = \", \".join(x['name'] for x in cast[:5])\n\n directors = [x for x in movie['credits']['crew'] if x['department'] == 'Directing']\n info['directors'] = \", \".join(x['name'] for x in directors[:2])\n\n return info" ]
[ "0.7126585", "0.68741995", "0.6854062", "0.67303324", "0.6346566", "0.6204759", "0.609547", "0.5925734", "0.58227336", "0.5796495", "0.57389706", "0.5738439", "0.57043684", "0.57000756", "0.5661391", "0.5624544", "0.56051505", "0.5586314", "0.5582274", "0.5544493", "0.5541237", "0.5521269", "0.5513014", "0.5472269", "0.5469821", "0.5469181", "0.546874", "0.54644036", "0.545413", "0.54386264" ]
0.7592274
0
Add or update movie rating
def add_movie_rating(movie_id): rating = request.form.get("rating") # Rating object from logged in user for movie on page user_rating_query = Rating.query.filter(Rating.user_id == session["user_id"], Rating.movie_id == movie_id).first() # Check to see if rating exists from logged in user if user_rating_query: # Update rating user_rating_query.score = rating db.session.commit() flash("Rating updated") else: # Since rating doesn't exist, add rating to ratings table user_rating = Rating(movie_id=movie_id, user_id=session["user_id"], score=rating) db.session.add(user_rating) db.session.commit() flash("Rating added") return redirect(f"/movies/{movie_id}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_movie_rating_record(movie_id, rating_number, operation):\n movie = models.Movie.objects.get(mid=movie_id)\n if operation == 'new':\n # Update the average_rating and votecount for the movie.\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) + rating_number) / (\n movie.votecount + 1)\n movie.votecount += 1\n movie.save()\n elif operation == 'delete':\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) - float(rating_number)) / (\n movie.votecount - 1)\n movie.votecount -= 1\n movie.save()\n elif operation == 'edit':\n movie.average_rating = float(movie.average_rating) + (float(rating_number) / movie.votecount)\n movie.save()", "def rate_movie(movie_id):\n\n user_rating = request.args.get(\"user_rating\")\n # get user id from log in email address\n user_email = session[\"logged_in_user_email\"]\n\n user = User.query.filter(User.email == user_email).one()\n\n user_id = user.user_id\n\n # Check if user rating exists in database\n # If user has rated this movie before, update value\n # Else, add user rating to database by movie id and user id\n if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():\n # When updating a value, we need to use the key-value pair in update()\n db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({\"score\": user_rating})\n\n # db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)\n db.session.commit()\n\n flash(\"You have rated this movie before! It has now been updated to %s.\" % (user_rating), \"warning\")\n return redirect(\"/users/%s\" % user_id)\n\n else:\n db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))\n db.session.commit()\n \n flash(\"You have rated this movie a %s.\" % (user_rating), \"info\")\n \n return redirect(\"/users/%s\" % user_id)\n\n\n # Get user rating routed correctly, as this was just test code\n # Fix label format for movie profile page\n\n return render_template(\"rate_movie.html\", user_rating=user_rating)", "def rating(self, **kwargs):\n\n data = dict()\n data['value'] = kwargs.get('value') or None\n\n path = self._get_movie_id_path('rating')\n resp = self._post_method(path, kwargs, data)\n\n return resp", "def insert(self, movie):\n self.movies[movie.rating] = movie\n self._updateMovieList = True", "def create_rating(input_user_id, input_rating, input_movie_id):\n \n rating = Rating(user_id=input_user_id, rating=input_rating, movie_id=input_movie_id)\n \n db.session.add(rating)\n db.session.commit()\n\n return rating", "def update_single_movie_rating(self, current_id):\n data_model = MovieRating(current_id)\n movie_rating = data_model.get_movie_ratings()\n self.loader.load_movie_rating(movie_rating)", "def accept_rating():\n \n return\n\n #def needs to get the movie id so that rating can be tied to movie\n\n #if statement\n #create sqla to add a new rating\n #flash message that says \"rating has been added\"\n\n #else statement\n #create sqla to update user rating\n #flash message that says \"rating has been updated\"\n\n #return redirect to movie title page", "def update_rating(user, wine, rating):\n\n #query for the object (user and wine_id)\n current_rating = Rating.query.filter(Rating.user==user, Rating.wine==wine).first()\n #reassign the rating attribute of that rating\n current_rating.rating = rating\n #then potentially need to: commit, may need to add to session before committing\n db.session.merge(current_rating)\n db.session.commit()\n\n return current_rating", "def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return", "def add_ratings(self, ratings):\n # Convert ratings to an RDD\n new_ratings_RDD = self.sc.parallelize(ratings)\n # Add new ratings to the existing ones\n self.ratings_RDD = self.ratings_RDD.union(new_ratings_RDD)\n # Re-compute movie ratings count\n self.__count_and_average_ratings()\n # Re-train the ALS model with the new ratings\n self.__train_model()\n \n return ratings", "def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()", "def add_rating(self, rating):\n if rating >= 0 and rating <= 4:\n self.ratings.append(rating)\n else:\n print(\"Invalid Rating\")", "def update_comment_score(self, loginID, commentID, attrib_name):\n self.cursor.execute(\"SELECT rating FROM rates WHERE loginID = %s AND commentID = %s\", (loginID, commentID))\n old_rating = self.cursor.fetchall()\n if old_rating:\n # This user already rated this comment. Change the rating.\n if old_rating[0][0] == attrib_name:\n # Remove the rating, because the user already voted for this.\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"-1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"DELETE FROM rates WHERE loginID=%s AND commentID=%s\"\"\",\n (loginID, commentID))\n else:\n self.cursor.execute(\n \"UPDATE comment SET \" + old_rating[0][0] + \"=\" + old_rating[0][0] + \"-1, \" + attrib_name\n + \"=\" + attrib_name + \"+1 WHERE commentID=%s\"\"\", (commentID,))\n self.cursor.execute(\"\"\"UPDATE rates SET rating=%s WHERE loginID=%s AND commentID=%s\"\"\",\n (attrib_name, loginID, commentID))\n else:\n # New rating, just need to update one value and add a new rating tuple to rates\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"+1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"INSERT INTO rates VALUES (%s,%s,%s)\"\"\", (loginID, commentID, attrib_name))\n self.db.commit()\n self.update_comment_avg_score(commentID)", "def add_rating(self, rating):\n if not rating or rating < 0 or rating > 4:\n return \"Rating {rating} is not valid. Valid ratings are between 0 and 4\".format(rating=rating)\n else:\n self.ratings.append(rating)", "def set_stars():\n prod_id = int(request.vars.prod_id)\n logger.info(\"changing stars on prod_id {%s}\" %prod_id)\n rating = int(request.vars.rating)\n logger.info(\"auth.user from api: %s\"%auth.user.email )\n db.stars.update_or_insert(\n (db.stars.prod_id == prod_id) & (db.stars.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n rating = rating\n )\n new_avg = calc_avg_rating(prod_id)\n return response.json(dict(new_avg=new_avg))", "def add_movie_review(request):\n print (json.loads(request.body))\n serializer = MovieReviewsSerializer(data=json.loads(request.body))\n temp = json.loads(request.body)\n movie_rev = MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id'])\n if len(movie_rev) > 0:\n movie = Movie.objects.filter(pk=temp['movie_id'])\n serializer2 = MovieSerializer(movie, many=True)\n old = MovieReviewsSerializer(movie_rev, many=True).data[0]['rating']\n initial = serializer2.data[0]['rating']\n num = serializer2.data[0]['no_of_reviews']\n new_rating = ((initial*num)+(temp['rating']-old))/num\n MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id']).update(description=temp['description'], rating=temp['rating'])\n Movie.objects.filter(pk=temp['movie_id']).update(rating=new_rating)\n else:\n if serializer.is_valid():\n serializer.save()\n movie = Movie.objects.filter(pk=serializer.data['movie_id'])\n serializer2 = MovieSerializer(movie, many=True)\n initial = serializer2.data[0]['rating']\n num = serializer2.data[0]['no_of_reviews']\n print (num)\n if num == 0:\n Movie.objects.filter(pk=serializer.data['movie_id']).update(rating=serializer.data['rating'], no_of_reviews=1)\n else:\n new_val = ((initial*num)+serializer.data['rating'])/(num+1)\n Movie.objects.filter(pk=serializer.data['movie_id']).update(rating=new_val, no_of_reviews=num+1)\n serializer2 = MovieSerializer(movie, many=True)\n else: #return HttpResponse(\"done\")\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n MovieReviews.objects.filter(user_id=temp['user_id'], movie_id = temp['movie_id']).update(positivity=func(temp['description']))\n reviews = MovieReviews.objects.filter(user_id=temp['user_id'], movie_id=temp['movie_id'])\n serializer3 = MovieReviewsSerializer(reviews, many=True)\n return Response(serializer3.data, status=status.HTTP_201_CREATED)", "def create_rating(user, movie, score):\n\n # pass in user object, movie object, score integer\n # To test this function in the interactive mode, \n # create the user and movie objects and then pass \n # in those objects as the arguments\n rating = Rating(user=user, movie=movie, score=score)\n\n db.session.add(rating)\n db.session.commit()\n\n return rating", "def enterRating():\n if request.method == 'POST':\n movieName = request.form['movieName']\n username = request.form['userName']\n rating = request.form['rating']\n comment = request.form['comment']\n post([movieName, username, rating, comment])\n return render_template('rating_enter.html')", "def add_user_rating(self, user_id, movie_id, rating):\r\n new_row = {'user_id': int(user_id), 'item_id': int(movie_id), 'rating': rating}\r\n self.df_app_data = self.df_app_data.append(new_row, ignore_index=True)", "def insert(self, movie_name, year_released, genre, rating, review, reviewer):\n params = {'movie_name': movie_name, 'year_released': year_released,'genre':genre, 'rating': rating, 'review': review, 'reviewer': reviewer}\n self.movie_reviews.append(params)\n return True", "def update_rating(self, name, rating):\n try:\n self.cursor.execute(\n \"\"\"UPDATE sandbox.dvds_rdbhdb_super\n SET rating = %s\n WHERE name LIKE %s\n \"\"\", \n (rating, name)\n )\n except (db.DataError, db.IntegrityError), e:\n if e[0] == '22P02':\n print 'Cannot add %s because its not a valid float' % rating\n else:\n print 'Caught Error while trying to update %s to %s' % (name, rating)\n #traceback.print_exc()", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def rate_video (self, video_id, rating):\n\n # dirty rating validation\n ratun = int(rating)\n if rating > 10 or rating < 0:\n return False\n\n # In opposition to Kodi, Netflix uses a rating from 0 to in 0.5 steps\n if rating != 0:\n rating = rating / 2\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json, text/javascript, */*',\n }\n\n params = {\n 'titleid': video_id,\n 'rating': rating\n }\n\n payload = json.dumps({\n 'authURL': self.user_data['authURL']\n })\n\n response = self._session_post(component='set_video_rating', type='api', params=params, headers=headers, data=payload)\n return response.status_code == 200", "def updateUserRating(definition, increase):\n user = mongo.db.users.find_one({\"_id\": definition[\"submitted_by\"]})\n mongo.db.users.update_one(\n {\"_id\": user[\"_id\"]},\n {\"$inc\": {\"total_rating\": increase}})", "def update_rating(self, new_rating: float, date: Union[str, float]):\n self.logger.info(f\"Updating rating for {self.id}: {self.rating:.3f} --> {new_rating:.3f}\")\n self.rating = new_rating\n self._update_rating_history(rating=new_rating, date=date)", "def post_rating():\n\n id = request.args.get('id')\n\n rating = request.args.get('rating')\n\n record = mod.provide_rating(id, int(rating))\n\n return jsonify(record)", "def add_rating(user, item, rating):\n users.append(user)\n items.append(item)\n ratings.append(rating)", "def update_girl(self, hash, new_rate):\n image = self._db.girls.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.girls.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)", "def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']" ]
[ "0.785876", "0.73302704", "0.7036317", "0.6898909", "0.6847553", "0.6840464", "0.68197745", "0.68010294", "0.6775351", "0.6662615", "0.6642418", "0.6628332", "0.64915556", "0.648122", "0.64792055", "0.6477873", "0.64777255", "0.6424817", "0.6379951", "0.6375289", "0.6370936", "0.63708997", "0.63708997", "0.63673306", "0.633586", "0.6315075", "0.62280285", "0.622799", "0.6193408", "0.6158546" ]
0.7663322
1
Given a binary tree print the elements in a zig zag order
def zig_zag_traversal(root): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zigzak_using_bfs(root):\n current_level = [root]\n next_level = []\n while current_level:\n node = current_level.pop()\n print(node.data, end=\" \")\n if node.right:\n next_level.append(node.right)\n if node.left:\n next_level.append(node.left)\n if not current_level:\n current_level, next_level = next_level, current_level\n print()", "def printBinaryTree(root):\n level = treeLevel(root)\n q = deque([root])\n while level > 0:\n new_q = deque()\n while q:\n tmp = q.popleft()\n new_q.append(tmp.left)\n new_q.append(tmp.right)\n print(level*' ', tmp.val, end='')\n print('\\n')\n q = new_q\n level -= 1\n return", "def print_leaves(t):\n for depth, leaves in sorted(leaves_by_depth(t).items()):\n print(depth-1, 'bits:', leaves)", "def zigzak(root, clockwise):\n ltr = clockwise\n for i in range(1, height(root)+1):\n print_level(root, i, ltr)\n ltr = not ltr\n print()", "def print_tree(root):\n queue = [(root, [\"1\"])]\n while queue:\n this, depth = queue.pop(0)\n if isinstance(this, int):\n reprr = \"L %i\" % this\n else:\n reprr = str(this.attribute)\n for key, child in this.children.items():\n queue.append((child, depth + [\"%s\" % key]))\n print \"%s: %s\" % (\".\".join(depth), reprr)", "def bft_print(self):\n queue = []\n queue.append(self)\n while len(queue):\n current = queue.pop(0)\n print(current.value)\n if current.left:\n queue.append(current.left)\n if current.right:\n queue.append(current.right)", "def postorder(root):\n if not root:\n return\n inorder(root.left)\n inorder(root.right)\n print(root.data, end=' ')", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_bi_tree(self):\n\n to_print = [self]\n # current = None\n\n while to_print:\n current = to_print.pop(0)\n if current:\n print(f'\\t{current.data}')\n to_print.append(current.left)\n to_print.append(current.right)", "def _print_inorder(self):\n if not self.root:\n return None\n else:\n stack = []\n node = self.root\n while len(stack) or node:\n if node:\n stack.append(node)\n node=node.get_left()\n else:\n node = stack.pop()\n print(node.get_data())\n node=node.get_right()", "def print_bfs(self):\n visit_order = self.bfs()\n s = \"Tree (from BFS)\\n\"\n previous_level = -1\n for i in range(len(visit_order)):\n node, level = visit_order[i]\n if level == previous_level:\n s += \" | \" + str(node) \n else:\n s += \"\\n\" + str(node)\n previous_level = level\n \n return s", "def inorder(root):\n if not root:\n return\n inorder(root.left)\n print(root.data, end=' ')\n inorder(root.right)", "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def zigzagLevelOrder(self, root):\n if not root:\n return []\n\n # create direction variable\n going_right = False\n\n # initialize a level stack\n level_stack = [root]\n\n # initialize a result array\n result = []\n\n # loop while level stack is not empry\n while level_stack:\n\n # create tmp array\n tmp = []\n\n res_tmp = []\n\n # do the BFS by looping through each element\n while level_stack:\n\n # pop an element from the stack\n node = level_stack.pop()\n\n res_tmp.append(node.val)\n\n # if direction is 1 or going right, take left node first\n if going_right == 1:\n if node.left:\n tmp.append(node.left)\n\n if node.right:\n tmp.append(node.right)\n\n # otherwise take right node first, append to the temp list\n else:\n if node.right:\n tmp.append(node.right)\n\n if node.left:\n tmp.append(node.left)\n\n # append temp list to the result list\n result.append(res_tmp[::-1])\n\n # update level stack\n level_stack = tmp\n\n # change the direction\n going_right = not going_right\n\n return result", "def level_order_1(root: Node):\n if not root:\n return\n temp = root\n que = [temp]\n while len(que) > 0:\n print(que[0].data, end=\" \")\n temp = que.pop(0)\n if temp.left:\n que.append(temp.left)\n if temp.right:\n que.append(temp.right)\n return que", "def levelorder(root):\n h = height(root)\n for i in range(1, h + 1):\n print_level(root, i)", "def visualise_binary_tree(self):\n tree_elements = [i for i in self.breadthfirst()] # saves the nodes of the tree in an array after the breadthfirst transversal is executed\n height = self.height(self.root())\n n = sum([2 ** i for i in range(0, height + 1)]) # total number of possible nodes of a tree\n array_tree = n * [\" \"] # array-based representation of a binary tree implemented by using level-numbering of positions(chapter 8.3.2 of Goodrich book)\n array_tree[0] = tree_elements[0] # assigning the root\n for i in range(0, len(tree_elements)):\n index1 = i\n if tree_elements[i] in array_tree:\n index1 = array_tree.index(tree_elements[i])\n for j in range(i, len(tree_elements)):\n if tree_elements[j] == self.left(tree_elements[i]):\n array_tree[2 * index1 + 1] = tree_elements[j]\n if tree_elements[j] == self.right(tree_elements[i]):\n array_tree[2 * index1 + 2] = tree_elements[j]\n break\n for i in range(0, len(array_tree)):\n if array_tree[i] != \" \": # the empty nodes are represented by \" \"\n array_tree[i] = array_tree[i].element() # changing the array from nodes to elements of the nodes\n height1 = height\n spaces = 2 ** (height + 1) - 2 # initialises the number of spaces that have to be added when displaying the nodes\n height -= 1\n pos = 0 # index of the node that is displayed\n print(spaces * \" \" + array_tree[pos])\n for i in range(0, height1 + 1): #iterates through all the levels of the binary tree\n spaces = 2 ** (height + 1) - 2\n level = spaces * \" \" # initialises each level of the binary tree with the appropiate number of spaces\n height += 1\n spaces = 2 ** (height + 1) - 1\n if 2 * pos + 3 > len(array_tree): # exit the loop if the tree was traversed\n break\n for j in range(0, 2 ** i):\n level += array_tree[2 * pos + 1] + \" \" * spaces + array_tree[2 * pos + 2] + \" \" * spaces # adds the nodes from that level\n pos += 1\n height -= 2\n print(level)", "def print_binary_tree_bfs(root):\n if not isinstance(root, BinaryTreeNode):\n return\n queue = Queue()\n queue.put(root)\n\n while not queue.empty():\n node = queue.get()\n print(node.val)\n if node.left != None:\n queue.put(node.left)\n if node.right != None:\n queue.put(node.right)", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n h = height(root) \n for i in range(1, h+1): \n printGivenLevel(root, i) \n print(\"=========================================================\")", "def preorder(root):\n if not root:\n return\n print(root.data, end=' ')\n inorder(root.left)\n inorder(root.right)", "def print_tree(t):\r\n if (t==None):\r\n return \r\n else:\r\n print_tree(left(t))\r\n print(value(t),end=\" \")\r\n print_tree(right(t))", "def inorder_recursive(root):\n if root:\n inorder_recursive(root.left)\n print(root.data, end=\" \")\n inorder_recursive(root.right)", "def print_tree(self):\n\t\tself.root.print_recursive(0)", "def postorder_recursive(root):\n if root:\n postorder_recursive(root.left)\n postorder_recursive(root.right)\n print(root.data, end=\" \")", "def printTree(self):\r\n print(self.letter)\r\n if self.left:\r\n self.left.printTree()\r\n if self.right:\r\n self.right.printTree()", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n \n print(\"=========================================================\")", "def asciitree(obj,depth=0,wide=2,last=[],recursed=False):\n\tcorner = u'\\u251C'\n\tcorner_end = u'\\u2514'\n\thorizo,horizo_bold = u'\\u2500',u'\\u2501'\n\tvertic,vertic_bold = u'\\u2502',u'\\u2503'\n\ttl,tr,bl,br = u'\\u250F',u'\\u2513',u'\\u2517',u'\\u251B'\n\tspacer_both = dict([(k,{\n\t\t0:'\\n',1:(' '*(wide+1)*(depth-1)+c+horizo*wide),\n\t\t2:' '*(wide+1)*(depth-1)}[depth] if depth <= 1 \n\t\telse (''.join([(vertic if d not in last else ' ')+\n\t\t' '*wide for d in range(1,depth)]))+c+horizo*wide) \n\t\tfor (k,c) in [('mid',corner),('end',corner_end)]])\n\tspacer = spacer_both['mid']\n\tif type(obj) in [float,int,bool]+str_types_list:\n\t\tif depth == 0: print(spacer+str(obj)+'\\n'+horizo*len(obj))\n\t\telse: print(spacer+str(obj))\n\telif isinstance(obj,dict) and all([type(i) in [str,float,int,bool] for i in obj.values()]) and depth==0:\n\t\tasciitree({'HASH':obj},depth=1,recursed=True)\n\telif type(obj) in [list,tuple]:\n\t\tfor ind,item in enumerate(obj):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(item) in [float,int,bool]+str_types_list: print(spacer_this+str(item))\n\t\t\telif item != {}:\n\t\t\t\tprint(spacer_this+'('+str(ind)+')')\n\t\t\t\tasciitree(item,depth=depth+1,\n\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\trecursed=True)\n\t\t\telse: print('unhandled tree object %s'%item)\n\telif isinstance(obj,dict) and obj != {}:\n\t\tfor ind,key in enumerate(obj.keys()):\n\t\t\tspacer_this = spacer_both['end'] if ind==len(obj)-1 else spacer\n\t\t\tif type(obj[key]) in [float,int,bool]+str_types_list: print(spacer_this+str(key)+' = '+str(obj[key]))\n\t\t\t# special: print single-item lists of strings on the same line as the key\n\t\t\telif type(obj[key])==list and len(obj[key])==1 and type(obj[key][0]) in [str,float,int,bool]:\n\t\t\t\tprint(spacer_this+key+' = '+str(obj[key]))\n\t\t\t# special: skip lists if blank dictionaries\n\t\t\telif type(obj[key])==list and all([i=={} for i in obj[key]]):\n\t\t\t\tprint(spacer_this+key+' = (empty)')\n\t\t\telif obj[key] != {}:\n\t\t\t\t# fancy border for top level\n\t\t\t\tif depth == 0:\n\t\t\t\t\tprint('\\n'+tl+horizo_bold*(len(key)+0)+\n\t\t\t\t\t\ttr+spacer_this+vertic_bold+str(key)+vertic_bold+'\\n'+\\\n\t\t\t\t\t\tbl+horizo_bold*len(key)+br+'\\n'+vertic)\n\t\t\t\telif obj[key]==None: print(spacer_this+key+' = None')\n\t\t\t\telse: print(spacer_this+key)\n\t\t\t\tif obj[key]!=None: \n\t\t\t\t\tasciitree(obj[key],depth=depth+1,\n\t\t\t\t\t\tlast=last+([depth] if ind==len(obj)-1 else []),\n\t\t\t\t\t\trecursed=True)\n\t\t\telif type(obj[key])==list and obj[key]==[]:\n\t\t\t\tprint(spacer_this+'(empty)')\n\t\t\telif obj[key]=={}: print(spacer_this+'%s = {}'%key)\n\t\t\telse: print('unhandled tree object %s'%key)\n\telse: print('unhandled tree object %s'%obj)\n\tif not recursed: print('\\n')", "def printTree(self):\n print(printTreeF(self, 0, self))", "def print_val(tree):\n if tree == None:\n return\n\n # Prints the inorder sequence of the tree\n print_val(tree.get_left())\n print(tree)\n print_val(tree.get_right())" ]
[ "0.72299", "0.6941187", "0.69075686", "0.68050545", "0.6722692", "0.65757614", "0.6556757", "0.65556186", "0.65556186", "0.65482455", "0.6534519", "0.6528175", "0.65199995", "0.64702153", "0.6451519", "0.643972", "0.64164495", "0.6406407", "0.63993025", "0.63982576", "0.6393286", "0.63601243", "0.63522065", "0.6337391", "0.63266575", "0.6312998", "0.6304309", "0.62808615", "0.6277036", "0.62669444" ]
0.7660401
0
returns the plotting paramters like legend size, text font size, etc See the return object
def global_plotting_parameters(): dpi = 10 plotting_param_dict = {'dpi':dpi, 'axis_font':{'size': str(int(15*dpi))}, 'title_font':{'size': str(18*dpi)}, 'legend_size':{'size': str(12*dpi)}, 'tick_size': 12*dpi, 'marker_size':100*3.5*dpi} return plotting_param_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _plot_params(self, ax=None):\n linewidth = 2\n size = 12\n\n # grid and ticks settings\n ax.minorticks_on()\n ax.grid(b=True, which='major', linestyle='--',\n linewidth=linewidth - 0.5)\n ax.grid(b=True, which='minor', axis='both',\n linestyle=':', linewidth=linewidth - 1)\n ax.tick_params(which='both', labelsize=size+2)\n ax.tick_params(which='major', length=6, axis='both')\n ax.tick_params(which='minor', length=3, axis='both')\n\n # labels and size\n ax.xaxis.label.set_size(size+4)\n ax.yaxis.label.set_size(size+4)\n # ax.title.set_fontsize(size+6) # not working, don't know why...\n\n return", "def show_mplrc_settings():\n print 'Using %s' % mpl.matplotlib_fname()\n r = mpl.rcParams\n\n ff = r['font.family'][0]\n print 'Font sizes for axes: %g; (x,y) ticks: (%g, %g): legend %g' % \\\n (r['axes.labelsize'], r['xtick.labelsize'],\n r['ytick.labelsize'], r['legend.fontsize'])\n print 'Font family %s uses face %s' % (ff, r['font.'+ff])\n\n print 'Figure size: %s, dpi: %g' % (r['figure.figsize'], r['figure.dpi'])", "def __init__(self):\n import matplotlib.pyplot as plt\n\n\n SMALL_SIZE = 12\n MEDIUM_SIZE = 14\n BIGGER_SIZE = 16\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def set_rc_params():\n\n mult = 1\n mpl.rcParams.update({'font.size': 12 * mult})\n mpl.rcParams['legend.fontsize'] = 15 * mult\n mpl.rcParams['axes.linewidth'] = 1\n mpl.rcParams['xtick.labelsize'] = 12 * mult\n mpl.rcParams['ytick.labelsize'] = 12 * mult\n mpl.rcParams['xtick.major.size'] = 5\n mpl.rcParams['ytick.major.size'] = 5\n mpl.rcParams['xtick.major.width'] = 1\n mpl.rcParams['ytick.major.width'] = 1\n mpl.rcParams['xtick.minor.size'] = 3\n mpl.rcParams['ytick.minor.size'] = 3\n mpl.rcParams['xtick.minor.width'] = 1\n mpl.rcParams['ytick.minor.width'] = 1\n mpl.rcParams['xtick.direction'] = 'in'\n mpl.rcParams['ytick.direction'] = 'in'\n mpl.rcParams['xtick.bottom'] = True\n mpl.rcParams['xtick.top'] = True\n mpl.rcParams['ytick.left'] = True\n mpl.rcParams['ytick.right'] = True\n mpl.rcParams['axes.labelsize'] = 15 * mult\n # mpl.rcParams['text.usetex'] = True", "def get_plot_options(self):\n plot_options = []\n # Get pumping rate plot options\n op = self.pumprate.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.well_count()):\n well_options = self.wells[i].get_plot_options()\n plot_options.extend(well_options)\n return(plot_options)", "def plot_configuration(self):\n # TODO : Most plots in pandas, etc return the axes not the figure. I\n # think the parent figure can always be gotten from an axis.\n if self.config_plot_func is None:\n msg = 'No plotting function has been assigned to config_plot_func.'\n raise ValueError(msg)\n else:\n args = []\n for k in getfullargspec(self.config_plot_func).args:\n if k == 'time':\n args.append(self._time['t'])\n elif k == 'time__hist':\n args.append(self._time['t'])\n elif k == 'time__futr':\n args.append(self._time['t'])\n elif k.endswith('__hist'):\n args.append(self._get_par_vals(k[:-6]))\n elif k.endswith('__futr'):\n args.append(self._get_par_vals(k[:-6]))\n else:\n args.append(self._get_par_vals(k))\n return self.config_plot_func(*args)", "def set_params(lw=1.5, universal_color=\"#262626\", fontsize=16):\n rc(\"font\", size=fontsize)\n rc(\"lines\", linewidth=lw, markeredgewidth=lw * 0.5)\n rc(\"patch\", linewidth=lw, edgecolor=\"#FAFAFA\")\n rc(\n \"axes\",\n linewidth=lw,\n edgecolor=universal_color,\n labelcolor=universal_color,\n axisbelow=True,\n )\n rc(\"image\", origin=\"lower\") # fits images\n rc(\"xtick.major\", width=lw * 0.75)\n rc(\"xtick.minor\", width=lw * 0.5)\n rc(\"xtick\", color=universal_color)\n rc(\"ytick.major\", width=lw * 0.75)\n rc(\"ytick.minor\", width=lw * 0.5)\n rc(\"ytick\", color=universal_color)\n rc(\"grid\", linewidth=lw)\n rc(\n \"legend\",\n loc=\"best\",\n numpoints=1,\n scatterpoints=1,\n handlelength=1.5,\n fontsize=fontsize,\n columnspacing=1,\n handletextpad=0.75,\n )", "def plot_options(cls, obj, percent_size):\n raise NotImplementedError", "def get_plot_options(self):\n plot_options = []\n # Get drawdown plot options\n op = self.drawdown.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.data_count()):\n op = self.data[i].get_plot_options()\n if op['visible']:\n plot_options.append(op)\n return(plot_options)", "def plot_defaults(width=6, height=6, fontsize=12, legend_fontsize=12):\n\n params = {'backend': 'pdf',\n 'figure.figsize': [width, height],\n 'font.size': fontsize,\n 'axes.titlesize': 'medium',\n 'axes.labelsize': 'medium',\n 'legend.fontsize': legend_fontsize,\n 'legend.frameon' : False,\n 'figure.dpi': 600,\n 'lines.markersize': 4,\n 'lines.linewidth': 1,\n 'lines.antialiased': False,\n 'path.simplify': False }\n \n mpl.rcParams.update(params)", "def set_plot_param():\n\n mpl.rc('axes', edgecolor='dimgrey')\n mpl.rc('axes', labelcolor='dimgrey')\n mpl.rc('xtick', color='dimgrey')\n mpl.rc('ytick', color='dimgrey')\n mpl.rc('legend', fontsize='large')\n mpl.rc('text', color='dimgrey')", "def params(self):\n return {'shape': self.shape,\n 'name': self.name}", "def get_scattering(self, param_name: list = ['S11', 'S21']):\n # TODO: move the plot in this analysis module. Renderer should recover the entire data\n return self.renderer.plot_params(param_name)", "def PlotSettings():\n\n # Color palette\n import seaborn as sns\n # sns.set()\n\n # Axes font size\n sns.set(font_scale=1.2)\n\n # LaTeX\n if find_executable('latex'):\n plt.rc('text',usetex=True)\n matplotlib.font_manager._rebuild()\n\n # Style sheet\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n # Font (Note: this should be AFTER the plt.style.use)\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none' # text in svg file will be text not path.", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def parameters_table(self):\n set_option(\"display.max_colwidth\", 80)\n legend_a = Series(\"----------\", index=[\"*** Farmer ***\"])\n a = self.farmer.parameters_table()\n legend_b = Series(\"----------\", index=[\"*** Reseller***\"])\n b = self.reseller.parameters_table()\n legend_c = Series(\"----------\", index=[\"*** Cofiring plant ***\"])\n c = self.cofiring_plant.parameters_table()\n legend_d = Series(\"----------\", index=[\"*** Mining ***\"])\n d = Series(self.mining_parameter, self.mining_parameter._fields)\n display_as(d.loc[\"wage_mining\"], \"USD / hr\")\n legend_e = Series(\"----------\", index=[\"*** Prices ***\"])\n e = Series(self.price, self.price._fields)\n display_as(e.loc[\"biomass_plantgate\"], \"USD / t\")\n display_as(e.loc[\"biomass_fieldside\"], \"USD / t\")\n display_as(e.loc[\"coal\"], \"USD / t\")\n display_as(e.loc[\"electricity\"], \"USD / kWh\")\n return concat([legend_c, c, legend_a, a, legend_b, b, legend_d, d, legend_e, e])", "def default_params():\n params = {}\n params['load'] = None\n params['style'] = 'ggplot'\n params['show'] = True\n params['save'] = None\n return params", "def plot_settings(clear = True, grid = True):\n if clear:\n plt.clf() # Clears any previous figures\n\n # Setting figure size\n figure = plt.gcf()\n figure.set_size_inches(18, 10)\n\n # Setting size of plot elements\n plt.rc('axes', labelsize = 22, titlesize = 24) \n plt.rc('xtick', labelsize = 18) \n plt.rc('ytick', labelsize = 18) \n plt.rc('legend', fontsize = 20)\n plt.rc('axes', axisbelow = True) # Ensures that the grid is behind any graph elements\n if grid:\n plt.grid() # Adds a grid to the plot", "def get_options(self, panel=\"\"):\n mode = self.mode.currentText()\n\n if mode == self.ScaleCustom:\n width = self.width.value()\n height = self.height.value()\n\n elif mode == self.ScaleRenderSettings:\n # width height from render resolution\n width = mc.getAttr(\"defaultResolution.width\")\n height = mc.getAttr(\"defaultResolution.height\")\n\n elif mode == self.ScaleWindow:\n # width height from active view panel size\n\n if not panel:\n # No panel would be passed when updating in the UI as such\n # the resulting resolution can't be previewed. But this should\n # never happen when starting the capture.\n width = 0\n height = 0\n else:\n width = mc.control(panel, q=True, width=True)\n height = mc.control(panel, q=True, height=True)\n\n else:\n raise NotImplementedError(\"Unsupported scale mode: \"\n \"{0}\".format(mode))\n\n scale = [width, height]\n percentage = self.percent.value()\n scale = [math.floor(x * percentage) for x in scale]\n\n return {\n \"width\": scale[0],\n \"height\": scale[1]\n }", "def __init__(self):\n\n fig_width_pt = 800.0 \n pylab.rcParams.update(plot_params)", "def get_font_options(self): # real signature unknown; restored from __doc__\n pass", "def get_size(self_or_cls, plot):\n raise NotImplementedError", "def render(self):\n # TODO: this is when the backing store should be swapped in.\n from matplotlib.font_manager import FontProperties\n self.subplot.legend(prop=FontProperties(size=10))\n #self.subplot.legend()\n pass", "def mpl_patch_arguments(self):\n raise NotImplementedError()", "def __init__(self, size=None):\n if size is None:\n size = plt.rcParams[\"font.size\"]\n self.size = size", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def __init__(self,\n title = '',\n x_title = None,\n y_title = None,\n plot_header = True,\n ratio = False,\n x_range = None,\n y_max = None,\n y_min = None,\n legendColumns = 1):\n # Store the title\n self._title = title\n self._x_title, self._y_title = x_title, y_title\n\n # Store whether or not the user wants to create a plot header\n self._plot_header = plot_header\n\n # Calculate a unique name for the plot components\n name = _rand_uuid()\n\n # Default logy if off\n self._logy = False\n\n # Default off for integer x-ticks \n self._x_integer_ticks = False \n\n # store n columns for legend\n self.PLOT_LEGEND_N_COLUMNS = legendColumns \n\n # Create a canvas\n self._canvas = TCanvas(name + '_canvas',\n name,\n int(self.PLOT_WIDTH),\n int(self.PLOT_HEIGHT))\n SetOwnership(self._canvas, False)\n\n\n\n # Create the main plot and draw it\n self._plot = TPad(\n 'upperPad',\n 'upperPad',\n #name + '_plot', # WJF: don't need upper pad to have unique name \n #name,\n 0.0,\n (self.PLOT_RATIO_FRACTION\n if ratio\n else 0.0),\n 1.0,\n 1.0\n )\n SetOwnership(self._plot, False)\n self._plot.SetMargin(*(self.PLOT_MARGINS_WITH_RATIO\n if ratio\n else self.PLOT_MARGINS))\n self._plot.Draw()\n\n # Store ranges\n self._x_range = x_range\n if y_max is not None:\n self._set_maximum_value(y_max)\n if y_min is not None:\n self._set_minimum_value(y_min)\n\n # Switch back to the context of the canvas\n self._canvas.cd()\n\n\n # Create a ratio plot and draw it if requested\n if ratio:\n self._ratio_plot = TPad(\n 'lowerPad', # WJF, don't need lower pad to have unique name\n 'lowerPad',\n 0.0,\n 0.0,\n 1.0,\n self.PLOT_RATIO_FRACTION\n )\n SetOwnership(self._ratio_plot, False)\n self._ratio_plot.SetMargin(*self.PLOT_RATIO_MARGINS)\n self._ratio_plot.SetGridy(True)\n self._ratio_plot.Draw()\n else:\n self._ratio_plot = None\n # increase canvas margins\n #self._canvas.SetBottomMargin(1)\n #self._plot.SetMargin\n #self._canvas.SetLeftMargin(\n\n # Track whether or not we've already drawn to the main pad\n self._drawn = False\n\n # Track whether or not we've already drawn to the ratio pad\n self._ratio_drawn = False\n\n # Track that object which sets up the axes in the main plot\n self._axes_object = None\n\n # Track whether or not we've already added the atlas label to the main pad\n self._atlas_label_drawn = False\n\n # Create a structure to track any histograms we generate internally\n # which need to be added to any legends created\n self._legend_extras = []\n \n # Flag if y-axis has been set to a log scale \n self._logy = False", "def get_visual_size(self):\n print(self.my_name)\n print(self.my_distance)\n print(self.my_size)\n pass # do some fancyness here", "def __init__(self,width=\"\",typeset=\"\"):\r\n \r\n if width==\"single column paper\":\r\n if typeset==\"latex\":\r\n plt.rcParams['savefig.format']='eps'\r\n elif typeset==\"word\":\r\n plt.rcParams['savefig.format']=\"png\"\r\n #most journals: 9 cm (or 3.5 inch) for single column width and 18.5 cm (or 7.3 inch) for double column width.\r\n plt.rcParams['figure.figsize'] = 3.5, 2\r\n plt.rcParams['figure.dpi'] = 200\r\n plt.rcParams['axes.titlesize']='medium'\r\n plt.rcParams['axes.labelsize']='small'\r\n # plt.rcParams['axes.titlepad']= 4.0\r\n # plt.rcParams['axes.labelpad']= 1.0\r\n plt.rcParams['xtick.direction']='in'\r\n # plt.rcParams['xtick.major.pad']=5.0\r\n # plt.rcParams['xtick.major.size']=3.0\r\n plt.rcParams['xtick.labelsize'] = 'small'\r\n plt.rcParams['ytick.direction']='in'\r\n # plt.rcParams['ytick.major.pad']=2.0\r\n # plt.rcParams['ytick.major.size']=3.0\r\n plt.rcParams['ytick.labelsize'] = 'small'\r\n plt.rcParams['text.usetex'] = True\r\n plt.rcParams['legend.fontsize'] = 'small'\r\n plt.rcParams['figure.max_open_warning'] = 100\r\n elif width==\"double column paper\":\r\n if typeset==\"latex\":\r\n plt.rcParams['savefig.format']='eps'\r\n elif typeset==\"word\":\r\n plt.rcParams['savefig.format']=\"png\"\r\n #most journals: 9 cm (or 3.5 inch) for single column width and 18.5 cm (or 7.3 inch) for double column width.\r\n plt.rcParams['figure.figsize'] = 7.3, 5\r\n plt.rcParams['figure.dpi'] = 200\r\n plt.rcParams['axes.titlesize']='xx-large'\r\n plt.rcParams['axes.labelsize']='x-large'\r\n plt.rcParams['axes.titlepad']= 4.0\r\n plt.rcParams['axes.labelpad']= 1.0\r\n plt.rcParams['xtick.direction']='in'\r\n plt.rcParams['xtick.major.pad']=5.0\r\n # plt.rcParams['xtick.major.size']=3.0\r\n plt.rcParams['xtick.labelsize'] = 'x-large'\r\n plt.rcParams['ytick.direction']='in'\r\n # plt.rcParams['ytick.major.pad']=2.0\r\n # plt.rcParams['ytick.major.size']=3.0\r\n plt.rcParams['ytick.labelsize'] = 'x-large'\r\n plt.rcParams['text.usetex'] = True\r\n plt.rcParams['legend.fontsize'] = 'large'\r\n plt.rcParams['figure.max_open_warning'] = 100\r\n elif width==\"PPT\":\r\n if typeset==\"Beamer\":\r\n plt.rcParams['savefig.format']='eps'\r\n elif typeset==\"PPT\":\r\n plt.rcParams['savefig.format']=\"png\"\r\n #most journals: 9 cm (or 3.5 inch) for single column width and 18.5 cm (or 7.3 inch) for double column width.\r\n plt.rcParams['figure.figsize'] = 5.33, 3\r\n plt.rcParams['figure.dpi'] = 200\r\n plt.rcParams['axes.titlesize']='xx-large'\r\n plt.rcParams['axes.labelsize']='x-large'\r\n plt.rcParams['axes.titlepad']= 4.0\r\n plt.rcParams['axes.labelpad']= 1.0\r\n plt.rcParams['xtick.direction']='in'\r\n plt.rcParams['xtick.major.pad']=5.0\r\n # plt.rcParams['xtick.major.size']=3.0\r\n plt.rcParams['xtick.labelsize'] = 'x-large'\r\n plt.rcParams['ytick.direction']='in'\r\n # plt.rcParams['ytick.major.pad']=2.0\r\n # plt.rcParams['ytick.major.size']=3.0\r\n plt.rcParams['ytick.labelsize'] = 'x-large'\r\n plt.rcParams['text.usetex'] = True\r\n plt.rcParams['legend.fontsize'] = 'large'\r\n plt.rcParams['figure.max_open_warning'] = 100\r\n elif width==\"default\":\r\n plt.rcdefaults()\r\n # # plt.rcParams['savefig.format'] = 'pdf'\r\n # plt.rcParams['savefig.format']='svg'\r\n # #most journals: 9 cm (or 3.5 inch) for single column width and 18.5 cm (or 7.3 inch) for double column width.\r\n # plt.rcParams['figure.figsize'] = 8.27, 4.88\r\n # plt.rcParams['figure.dpi'] = 200\r\n # plt.rcParams['axes.titlesize']='large'\r\n # plt.rcParams['axes.labelsize']='large'\r\n # plt.rcParams['axes.titlepad']= 4.0\r\n # plt.rcParams['axes.labelpad']= 1.0\r\n # plt.rcParams['xtick.direction']='in'\r\n # plt.rcParams['xtick.major.pad']=5.0\r\n # # plt.rcParams['xtick.major.size']=3.0\r\n # plt.rcParams['xtick.labelsize'] = 'large'\r\n # plt.rcParams['ytick.direction']='in'\r\n # # plt.rcParams['ytick.major.pad']=2.0\r\n # # plt.rcParams['ytick.major.size']=3.0\r\n # plt.rcParams['ytick.labelsize'] = 'large'\r\n # plt.rcParams['text.usetex'] = True\r\n # plt.rcParams['legend.fontsize'] = 'medium'\r\n # plt.rcParams['figure.max_open_warning'] = 100\r\n # plt.rcParams['axes.labelsize'] = 20\r\n # plt.rcParams['axes.titlesize'] = 20\r\n # \r\n # plt.rcParams['ytick.labelsize'] = 15\r\n # plt.rcParams['legend.fontsize'] = 15\r\n # plt.rcParams['lines.linewidth'] = 2\r\n # plt.rcParams['lines.markersize'] = 10\r\n # plt.rcParams['font.family'] = 'sans-serif'\r\n # plt.rcParams['font.sans-serif'] = 'DejaVu Sans'\r\n # plt.rcParams['font.family'] = 'serif'\r\n # plt.rcParams['font.serif'] = 'Times New Roman'\r\n # plt.rcdefaults()\r", "def displayData(cls):\n return (\n \"paramName\",\n \"autoFollow\",\n \"lowerDisplay\",\n \"upperDisplay\",\n \"binCount\",\n \"xscale\",\n \"yweight\"\n )" ]
[ "0.68739355", "0.6408702", "0.63112503", "0.62113756", "0.60894156", "0.6065956", "0.60561585", "0.5976915", "0.5962896", "0.59489775", "0.5924714", "0.5917014", "0.5901491", "0.5853274", "0.5849428", "0.5835417", "0.5820773", "0.57584924", "0.5756284", "0.5722286", "0.5717825", "0.5707595", "0.5695543", "0.56834424", "0.56770915", "0.56555754", "0.5647547", "0.5645018", "0.56378484", "0.5623783" ]
0.7657647
0
assuming centerline cells have two layers, this method returns the arrays of each layers and their global id corresponding to cell_cent
def separate_centrline_lyers_by_y_coordinates(cell_cent, centerline_cells): cc4 = centerline_cells y_uniq_coord = np.unique(cc4[:,1]) idx_lst = [] cent_Y_lyrs = [] for yy in y_uniq_coord: cent_Y_lyrs_temp = centerline_cells[np.where(cc4[:,1] == yy)] idx = np.where((cell_cent==cent_Y_lyrs_temp[:,None]).all(-1))[1] cent_Y_lyrs.append(cent_Y_lyrs_temp) idx_lst.append(idx) return cent_Y_lyrs, idx_lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChipCoreAndCxId(layer):\n core_ids = []\n cx_ids = []\n chip_ids = []\n for id in layer.nodeIds:\n _, chip_id, core_id, cx_id, _, _ = layer.net.resourceMap.compartment(id)\n chip_ids.append(chip_id)\n core_ids.append(core_id)\n cx_ids.append(cx_id)\n return np.array(chip_ids), np.array(core_ids), np.array(cx_ids)", "def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))", "def _calcOrderedCellVertexIDs(self):\n ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l')\n indices = numerix.indices((self.nx, self.ny, self.nz))\n ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1)\n ids[0] = ids[1] + 1\n ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1)\n ids[2] = ids[3] + 1\n ids[5] = indices[0] + (indices[1] + indices[2] * (self.ny + 1) + 1) * (self.nx + 1)\n ids[4] = ids[5] + 1\n ids[7] = indices[0] + (indices[1] + indices[2] * (self.ny + 1)) * (self.nx + 1)\n ids[6] = ids[7] + 1\n\n return numerix.reshape(ids.swapaxes(1, 3), (8, self.numberOfCells))", "def _getTopCenterIndices(self, resolution, rectangular):\n # get x, y indices to get away from the ring basis.\n # indices starts with (0, 0) in the middle, with (r2, p1) -> (1, 0), etc. (x is on the pos 1 ray)\n\n numAxialLevels = 2 * resolution\n xi, yi = self.indices()\n if rectangular:\n topCenterI = 2 + (3 * resolution) * xi\n else:\n # 4*d b/c each increase in xi moves you back by numstacks/2\n topCenterI = 1 + (4 * resolution) * xi + (yi * numAxialLevels)\n topCenterJ = 1 + xi * numAxialLevels // 2 + numAxialLevels * yi\n return topCenterI, topCenterJ", "def get_boundary_layers(cell_cent, el, num_lyrs, bc_loc, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n if(struct_grd):\n fctr = 1\n corr = 0\n lyrs = float(num_lyrs-1)+ 0.0001\n else:\n fctr = 2\n corr = 1\n lyrs = float(num_lyrs)+ 0.0001\n\n lyrs = 1.0001*float(num_lyrs-1)\n for d in range(dim):\n bound_range[2*d] = np.min(cell_cent[:,d]) + corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) - corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] - lyrs*el[d]\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n #store only those key value pair that are in the bc_loc\n #this in the end returns mesh with ghost layer cells, \n #if they've been applied already\n keys = bound_nodes.keys()\n keys_temp = [kk for kk in keys]\n for kk in keys_temp:\n if kk not in bc_loc:\n bound_nodes.pop(kk, None)\n \n return bound_nodes", "def get_cell_centroid2(cents, extents):\n cells_in_ee = np.empty(0,int)\n for i in range(len(cents)):\n c = cents[i]\n if( (c > extents[0]).all() and (c <= extents[1]).all() ):\n cells_in_ee = np.append(cells_in_ee, [i], axis=0)\n\n return cells_in_ee", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def get_cell_center_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_center_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z", "def get_modified_boundary_layers(cell_cent, el, num_lyrs, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n bound_cents = {} #dict to store the node centroids corresponding to node numbers above\n \n if(struct_grd):\n factor = 1\n correction = 0\n else:\n factor = 2\n correction = 1\n\n lyrs = float(num_lyrs-1)+ 0.001\n \n for d in range(dim):\n bound_range[2*d] = factor*np.min(cell_cent[:,d]) + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) -lyrs*el[d] - el[d]/3*correction\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n bound_cents[2*d] = cell_cent[bound_nodes[2*d][0]]\n bound_cents[2*d+1] = cell_cent[bound_nodes[2*d+1][0]]\n\n return bound_nodes, bound_cents", "def get_cells_lines(lines: List[np.ndarray], dim: int) -> \\\n DefaultDict[Tuple[int], List[np.ndarray]]:\n \n size = lines[0].size\n shape = [size] * dim\n cells_lines = defaultdict(list)\n\n for line in lines:\n for j in range(size):\n cell_inds = np.unravel_index(line[j], shape)\n cells_lines[cell_inds].append(line) \n return cells_lines", "def cell_coord(id, Nx):\n nx = id // (Nx**2)\n ny = (id - nx * Nx**2) // Nx\n nz = id - nx * Nx**2 - ny * Nx\n return np.array([nx, ny, nz])", "def get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols):\n nb_x = np.zeros((5,5)) # this 5 by 5 max would contain the x coordinate of 16 neighbor pixels of a sample point\n nb_y = np.zeros((5,5)) # this 5 by 5 matrix would contain the y coordinate of 16 neighbor pixels of a sample point\n nb_z = np.zeros((5,5))\n # get index and value of cell in DEM containing current point\n (cell_X, cell_Y, cell_Z) = misc.getCellValue(pnts[i], \n dem, \n top_left_cor, \n cellsize)\n #Deal with sample points near boundary of the DEM\n point_within_dem = (cell_X-2) >=0 and (cell_Y-2>=0) and (cell_X+3)<=cols and (cell_Y+3)<=rows\n if point_within_dem:\n nb_z[0:5,0:5] = misc.RasterSubset(dem,(cell_Y-2),(cell_Y+3),(cell_X-2),(cell_X+3))\n else:\n #Get the part of moving window within the DEM domain\n in_data= misc.RasterSubset(dem,max((cell_Y-2),0),min((cell_Y+3),rows),max((cell_X-2),0),min((cell_X+3),cols))\n #in_data=dem[\"array\"][max((cell_Y-2),0):min((cell_Y+3),rows),max((cell_X-2),0):min((cell_X+3),cols)]\n nb_z[max((2-cell_Y),0):min((5-(cell_Y+3-rows)),5),max((2-cell_X),0):min((5-(cell_X+3-cols)),5)]=in_data[0:in_data.shape[0],0:in_data.shape[1]]\n in_data_avg=np.mean(in_data[in_data>-3.4e+10])\n nb_z[nb_z==0]=in_data_avg\n nb_z[nb_z<-3.4e+10]=in_data_avg\n\n\n \n # If there is missing data in the neighborhood of the sample point \n # use neighborhood average to replace the missing value \n has_missing_data = (nb_z>8848).sum()>0 or (nb_z<-413).sum()>0\n if has_missing_data:\n avgValue=np.mean(nb_z[np.where(np.logical_and(nb_z<8848, nb_z>-413))])\n nb_z[nb_z>8848]=avgValue\n nb_z[nb_z<-413]=avgValue\n \n # Obtain the coordinate of cell centroid of a 5*5 neighborhood around the sample point\n for ii in [0,1,2,3,4]:\n cor_y=ii-2\n dy = (cell_Y+cor_y+0.5) * cellsize[1]\n nb_y[ii,:] = top_left_cor[1] + dy\n for jj in [0,1,2,3,4]:\n cor_x=jj-2\n dx = (cell_X+cor_x+0.5) * cellsize[0]\n nb_x [:,jj] = top_left_cor[0] + dx\n return nb_x, nb_y, nb_z", "def get_pixel_positions(self, centre=True):\n out = np.zeros(self.expected_data_shape + (3,), dtype=np.float64)\n\n # Prepare some arrays to use inside the loop\n pixel_ss_coord, pixel_fs_coord = np.meshgrid(\n np.arange(0, self.frag_ss_pixels, dtype=np.float64),\n np.arange(0, self.frag_fs_pixels, dtype=np.float64),\n indexing='ij'\n )\n\n # Shift coordinates from corner to centre if requested.\n # This is also where the DSSC subclass shifts odd rows by half a pixel\n self._adjust_pixel_coords(pixel_ss_coord, pixel_fs_coord, centre)\n\n for m, mod in enumerate(self.modules, start=0):\n for t, tile in enumerate(mod, start=0):\n corner_x, corner_y, corner_z = tile.corner_pos\n ss_unit_x, ss_unit_y, ss_unit_z = tile.ss_vec\n fs_unit_x, fs_unit_y, fs_unit_z = tile.fs_vec\n\n # Calculate coordinates of each pixel's first corner\n # 2D arrays, shape: (64, 128)\n pixels_x = (\n corner_x\n + pixel_ss_coord * ss_unit_x\n + pixel_fs_coord * fs_unit_x\n )\n pixels_y = (\n corner_y\n + pixel_ss_coord * ss_unit_y\n + pixel_fs_coord * fs_unit_y\n )\n pixels_z = (\n corner_z\n + pixel_ss_coord * ss_unit_z\n + pixel_fs_coord * fs_unit_z\n )\n\n # Which part of the array is this tile?\n tile_ss_slice, tile_fs_slice = self._tile_slice(t)\n\n # Insert the data into the array\n out[m, tile_ss_slice, tile_fs_slice, 0] = pixels_x\n out[m, tile_ss_slice, tile_fs_slice, 1] = pixels_y\n out[m, tile_ss_slice, tile_fs_slice, 2] = pixels_z\n\n return out", "def cfdGetFaceCentroidsSubArrayForBoundaryPatch(self):\r\n \r\n for iBPatch, theBCInfo in self.cfdBoundaryPatchesArray.items():\r\n \r\n startBFace=self.cfdBoundaryPatchesArray[iBPatch]['startFaceIndex']\r\n endBFace=startBFace+self.cfdBoundaryPatchesArray[iBPatch]['numberOfBFaces']\r\n iBFaces=list(range(int(startBFace),int(endBFace))) \r\n \r\n self.cfdBoundaryPatchesArray[iBPatch]['faceCentroids']=[self.faceCentroids[i] for i in iBFaces]", "def _get_center(data, node_id, feature_columns):\n if node_id in data.id.values:\n return data[data.id == node_id][feature_columns].values\n else:\n return _get_center(data, node_id[:-1], feature_columns)", "def get_chunk_coordinates(self, node_or_chunk_id: np.uint64\n ) -> np.ndarray:\n layer = self.get_chunk_layer(node_or_chunk_id)\n bits_per_dim = self.bitmasks[layer]\n\n x_offset = 64 - self._n_bits_for_layer_id - bits_per_dim\n y_offset = x_offset - bits_per_dim\n z_offset = y_offset - bits_per_dim\n\n x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1\n y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1\n z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1\n return np.array([x, y, z])", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def xyzcellcenters(self):\n cache_index = 'cellcenters'\n if cache_index not in self._cache_dict or \\\n self._cache_dict[cache_index].out_of_date:\n # get x centers\n x = np.add.accumulate(self.__delr) - 0.5 * self.delr\n # get y centers\n Ly = np.add.reduce(self.__delc)\n y = Ly - (np.add.accumulate(self.__delc) - 0.5 *\n self.__delc)\n x_mesh, y_mesh = np.meshgrid(x, y)\n if self.__nlay is not None:\n # get z centers\n z = np.empty((self.__nlay, self.__nrow, self.__ncol))\n z[0, :, :] = (self._top[:, :] + self._botm[0, :, :]) / 2.\n for l in range(1, self.__nlay):\n z[l, :, :] = (self._botm[l - 1, :, :] +\n self._botm[l, :, :]) / 2.\n else:\n z = None\n if self._has_ref_coordinates:\n # transform x and y\n x_mesh, y_mesh = self.get_coords(x_mesh, y_mesh)\n # store in cache\n self._cache_dict[cache_index] = CachedData([x_mesh, y_mesh, z])\n if self._copy_cache:\n return self._cache_dict[cache_index].data\n else:\n return self._cache_dict[cache_index].data_nocopy", "def get_id_from_coor(self, x, y):\n x_coor = x // self._cell_dim\n y_coor = y // self._cell_dim\n return (x_coor, y_coor)", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]):\n size = heatmap.size()\n k = size[0] if size[0] <= 20 else 20\n maps = []\n for _ in range(k):\n xy_dict = {}\n single_heatmap = heatmap[_]\n xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap)\n maps.append(xy_dict)\n return maps, k", "def calculate_center_coordinates(self):\r\n coord_y = 320\r\n coord_x = -640\r\n distance_between_l = self.distance_between_layers()\r\n distance_between_n = self.distance_between_neurons()\r\n\r\n for layer in xrange(1, self.number_of_layers + 1):\r\n layer_data = []\r\n coord_x += distance_between_l\r\n\r\n for index_n, neuron in enumerate(xrange(1, self.number_of_neurons_in_layer + 1)):\r\n\r\n if index_n:\r\n coord_y -= distance_between_n\r\n else:\r\n coord_y = 320 # starting coordinates Y\r\n\r\n layer_data.append((coord_x, coord_y))\r\n\r\n self.central_coordinates[layer] = layer_data\r\n\r\n pprint(self.central_coordinates)\r\n self.calculate_outputs()", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def cell_edges(self):", "def _get_centeroids(self, pts, max_dist=None):\n\n if max_dist == None:\n max_dist=self.maximum_edge_point_distance\n dist=0\n center_pts=[]\n if len(pts)>0:\n pts = sorted(pts)\n i = 0\n j=1\n center_pts.append(pts[0])\n num_pts=1.0 # to avoid integer division\n num_clusters = 1\n while i<len(pts) and j<len(pts):\n if pts[j]-pts[i]<max_dist:\n if len(center_pts)<num_clusters:\n center_pts.append(0)\n center_pts[num_clusters-1] = center_pts[num_clusters-1]+pts[j]\n num_pts+=1.0\n j=j+1\n i+=1\n else:\n if len(center_pts)<=num_clusters:\n center_pts.append(0)\n center_pts[num_clusters-1]= int(center_pts[num_clusters-1]/num_pts)\n num_pts = 1\n num_clusters += 1\n i=j\n j=i+1\n if len(center_pts)<=num_clusters:\n center_pts[num_clusters-1] = int(center_pts[num_clusters-1]/num_pts)\n else:\n print \"error! center_pts!\", len(center_pts), num_clusters, self.key\n return center_pts", "def get_cntr_points_center(self, contour, img_size):\n res_arr = []\n \n # first border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = 0\n while curr_row != -1:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row -1\n curr_col = curr_col +1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n \n # second border line\n for row in range(img_size):\n tmp_arr = []\n curr_row = row\n curr_col = img_size -1\n while curr_row != img_size:\n if cv2.pointPolygonTest(contour,(curr_row,curr_col),True)> 0:\n tmp_arr.append([curr_row,curr_col])\n curr_row = curr_row +1\n curr_col = curr_col -1\n if len(tmp_arr):\n res_arr.append(tmp_arr)\n return res_arr", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def GetCoastGrids(LandMask):\n \n \"\"\"\n Define a coastline map. This map will be set to 1 on all coast cells.\n \"\"\"\n \n \n CoastlineMap = np.zeros((LandMask.shape[0], LandMask.shape[1]))\n \n \"\"\"\n We will use a nested loop to loop through all cells of the Landmask cell. What this loop basically does is,\n when a cell has a value of 1 (land), it will make all surrounding cells 1, so we create kind of an extra line of \n grids around the landmask. In the end we will substract the landmask from the mask which is created by the nested loop, \n which result in only a mask with the coast grids. Notice, that when we're in the corner, upper, side, or lower row, and we\n meet a land cell, we should not make all surrounding cells 1. For example, we the lower left corner is a land grid, you should only make the inner cells 1. \n \"\"\"\n \n for i in range(LandMask.shape[0]-1):\n for j in range(LandMask.shape[1]-1):\n \n\n \"\"\"\n We have nine if statements, four for the corners, four for the sides and one for the middle\n of the landmask. \n \"\"\"\n\n if i == 0 and j == 0: #upper left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n \n elif i == 0 and j != 0 and j != LandMask.shape[1]-1: #upper row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1, j] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i+1,j+1] = 1\n \n \n elif i == 0 and j == LandMask.shape[1]-1: #upper right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j-1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #right row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j-1] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i-1,j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #lower right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i-1,j] = 1 \n CoastlineMap[i-1, j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j != 0 and j != LandMask.shape[1]-1: #lower row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i-1, j] = 1\n CoastlineMap[i-1,j-1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n \n elif i == LandMask.shape[0]-1 and j == 0: #lower left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == 0: #left row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j+1] = 1\n CoastlineMap[i+1,j+1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n else:\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1 #middle\n CoastlineMap[i+1,j] = 1#lowermiddle\n CoastlineMap[i-1,j] = 1#uppermiddle\n \n CoastlineMap[i+1, j-1] = 1\n CoastlineMap[i-1, j-1] = 1\n CoastlineMap[i, j-1] =1\n \n CoastlineMap[i+1, j+1] = 1\n CoastlineMap[i-1, j+1] = 1\n CoastlineMap[i, j+1] = 1\n \n \n \n \"\"\"\n Here we substract the landmaks from the coastline mask, resulting in only\n the coastline. \n \"\"\"\n \n \n Coastgrids = CoastlineMap - LandMask\n \n return Coastgrids, CoastlineMap" ]
[ "0.61257327", "0.59683406", "0.58903646", "0.57042927", "0.5662886", "0.5600567", "0.5526157", "0.5503083", "0.5494525", "0.54930544", "0.5453292", "0.54367805", "0.54050046", "0.53910154", "0.53798836", "0.5362105", "0.53463304", "0.5340941", "0.5336969", "0.532595", "0.53255665", "0.530889", "0.5291185", "0.528357", "0.5272064", "0.52669674", "0.5265783", "0.5263411", "0.5241205", "0.5227771" ]
0.61829495
0
Converts conditions file to dictionary
def read_conditions(filepath: str): with open(filepath, 'r') as conditions_file: conditions = {} current_condition_name = line = conditions_file.readline().strip( ).lower() current_condition_description = [] while line: if line.startswith('"'): current_condition_description.append(line.replace('"', '')) else: conditions[current_condition_name] = '\n'.join( current_condition_description) current_condition_description = [] current_condition_name = line.strip().lower() line = conditions_file.readline() return conditions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_to_dictionary():\n\n return;", "def __analyze_config(self):\n result = {}\n with open(self.file) as f:\n data = f.readlines()\n temp_key = ''\n for line in data:\n if line[0] == '\t' or line[0] == ';':\n result[temp_key].append(line.strip())\n else:\n temp_key = line.strip()\n result[temp_key] = []\n return result", "def _split_raw_file(raw_file: str) -> dict:\r\n input_file = raw_file.split(\"\\n\")\r\n\r\n line_count = 0\r\n statements = {}\r\n while line_count < len(input_file):\r\n line = input_file[line_count]\r\n if len(line) == 0:\r\n line_count += 1\r\n continue\r\n else:\r\n key = line\r\n value = input_file[line_count + 1]\r\n statements.update({key: value})\r\n line_count += 2\r\n return statements", "def dict_from_file(filename):\n token = '\\s+|(?<!\\d)[,](?!\\d)'\n letters = re.compile(\"(^[a-zA-Z])\") # starts with a letter\n numbers = re.compile(\"(^(?:[+\\-])?(?:\\d*)(?:\\.)?(?:\\d*)?(?:[eE][+\\-]?\\d*$)?)\")\n empty = re.compile(\"(^\\s*$)\") # is a blank line\n\n print(\"\\nReading {0:s} ...\".format(filename))\n with open(filename, 'r') as fh:\n lines = fh.readlines()\n content = dict()\n for line in lines:\n value = None\n key = None\n if not empty.match(line):\n if letters.match(line):\n pair = re.split(token, line.strip(), maxsplit=3)\n if len(pair) == 2: # key and value exist\n key = pair[0] # first item is the key\n val = pair[1] # second item is the value\n if letters.match(val):\n value = val\n if numbers.fullmatch(val):\n value = eval(val)\n if len(pair) == 3: # key min max exist\n key = pair[0]\n val1, val2 = pair[1:]\n if numbers.fullmatch(val1) and numbers.fullmatch(val2):\n value = (eval(val1), eval(val2))\n else:\n raise ValueError(\"Min/max values expected for {0}\"\n .format(key))\n # ignore the filter file pointings and the sensitivity files these are\n # used for simulation\n if key and (value is not None):\n if ((\"FILTER\" not in key) and (\"SENSITIVITY\" not in key)):\n content[key] = value\n print(\"Setting {0:s} = {1}\".format(key, value))\n\n return content", "def get_dictionary(file_data):\n data_iter_rows = list(file_data.iter_rows())\n params = GetParams(data_iter_rows[0])\n data_list = []\n for rows in data_iter_rows[1:]:\n row = [str(cell.value).replace('\\n', '').strip() for cell in rows]\n rel_modalities = [x.strip() for x in row[4].split(',') if ch_none(x)]\n unique_finding = ', '.join([x for x in row[163:173] if ch_none(x)])\n params_list = SetParams(params, row)\n dict_birad = {'Typical': 5, 'Possible': 3, 'None': 2, 'Ignore': 1}\n d = {'Name': row[1], 'Condition description': row[2],\n 'Relevant modalities': rel_modalities,\n 'Unique findings': unique_finding,\n 'mammo_params': params_list.mammo_params,\n 'us_params': params_list.us_params,\n 'mri_params': params_list.mri_params,\n 'birad[0]': dict_birad[row[153]],\n 'birad[1]': dict_birad[row[154]],\n 'birad[2]': dict_birad[row[155]],\n 'birad[3]': dict_birad[row[156]],\n 'birad[4]': dict_birad[row[157]],\n 'birad[5]': dict_birad[row[158]],\n 'birad[6]': dict_birad[row[159]],\n 'Associated conditions': row[160],\n 'Differential diagnosis': row[162],\n }\n data_list.append(d)\n\n return data_list", "def create_counterparty_dict(file_name) -> Dict[str, str]:\n dct = {}\n with open(file_name) as f:\n root_dir = f.readline().strip('\\n')\n for line in f:\n key, val = line.strip('\\n').split('!!!!')\n temp = val.split('==')\n d = {'path': root_dir + temp[0], 'to': temp[1:]}\n dct[key] = d\n return dct", "def config_dict(filename):\n f = open(filename, \"r\")\n cfglines = f.readlines()\n f.close()\n cfgdict = {}\n for line in cfglines:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n try:\n key, value = line.split(\"=\")\n except:\n print \"Bad line in config-file %s:\\n%s\" % (filename, line)\n continue\n key = key.strip()\n value = value.strip()\n if value in [\"True\", \"False\", \"None\", \"''\", '\"\"']:\n value = eval(value)\n else:\n try:\n if \".\" in value:\n value = float(value)\n else:\n value = int(value)\n except:\n pass # value need not be converted\n cfgdict[key] = value\n return cfgdict", "def _parse(file_contents):\n\n if file_contents is None or file_contents == '':\n return {}\n\n result = {}\n\n for line in file_contents.splitlines():\n # Full line comment\n if line[:1] == '#':\n continue\n\n parts = line.split('=', 1)\n\n # Not a full key-value pair.\n if len(parts) < 2:\n continue\n\n result[parts[0].strip()] = parts[1].strip()\n\n return result", "def filename_to_condition(fname):\n fname = os.path.splitext(os.path.basename(fname))[0]\n elements = fname.split('_')\n result = {}\n for term in elements:\n try:\n index = term.index('-')\n key = term[:index]\n result[key] = term[index + 1:].split('-')\n if len(result[key]) == 1:\n result[key] = result[key][0]\n except ValueError:\n continue\n return result", "def _get_conditions(sample: domain.Sample, conditions: Iterable[combi.Input]) -> Dict[str, float]: # pragma: no cover\n # The default value is 0 for all required conditions.\n result = {condition.name: 0.0 for condition in conditions}\n\n # For each condition, we'll try to update this value by looking at the BCKG data.\n for condition in conditions:\n # Look up for the condition in `sample`\n for candidate in sample.conditions:\n if candidate.reagent.name == condition.name:\n # Change units\n new_concentration: domain.Concentration = units.change_units(\n concentration=candidate.concentration, desired_unit=condition.units\n )\n result[condition.name] = new_concentration.value\n\n return result", "def read_env_file(path: str) -> dict: \n with open(path, 'r') as f:\n return dict(tuple(line.replace('\\n', '').split('=',1)) for line\n in f.readlines() if not line.startswith('#'))", "def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction", "def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary", "def _get_config_from_file(self, filename, section):\r\n\r\n def clean(value):\r\n return {'true': True, 'false': False}.get(value.lower(), value)\r\n\r\n config = ConfigParser.RawConfigParser()\r\n config.read(os.path.join(self.config_path, filename))\r\n try:\r\n keys = config.options(section)\r\n except ConfigParser.NoSectionError:\r\n return {}\r\n return dict([[k, clean(config.get(section, k))] for k in keys])", "def read_file_convert_dict(file: str) -> dict:\n states_code = pd.read_csv(file)\n states_code = states_code.set_index('abbreviation')\n dict_y = states_code['state'].to_dict()\n return dict_y", "def to_dictionary(file):\n\n\tfin = open(file)\n\td = dict()\n\n\tfor line in fin:\n\t\td[line.strip()] = ''\n\treturn d", "def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict", "def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)", "def get_config_dicts(config_file):\n config_dicts = dict()\n time_stamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n current_name = None\n for i, line in enumerate(config_file):\n try:\n line = line.strip()\n line = re.sub(r\"#.*\", \"\", line)\n line = re.sub(r\"\\$TIME\", time_stamp, line)\n if not line:\n pass\n elif line.startswith(\";\"):\n pass\n elif OBJECT_NAME.match(line):\n current_name = OBJECT_NAME.match(line).group(1)\n if current_name in config_dicts:\n raise IniSyntaxError(i, \"Duplicit object key: '{}', line {}.\"\n .format(current_name, i))\n config_dicts[current_name] = dict()\n elif KEY_VALUE_PAIR.match(line):\n matched = KEY_VALUE_PAIR.match(line)\n key = matched.group(1)\n value_string = matched.group(2)\n if key in config_dicts[current_name]:\n raise IniSyntaxError(i, \"Duplicit key in '{}' object, line {}.\"\n .format(key, i))\n config_dicts[current_name][key] = format_value(value_string)\n else:\n raise IniSyntaxError(i, \"Unknown string: '{}'\".format(line))\n except IniSyntaxError as exc:\n raise\n except Exception as exc:\n raise IniSyntaxError(i, \"Error\", exc) from None\n\n config_file.close()\n return config_dicts", "def format_condor_dict(data):\n\n reserved_names = frontendConfig.condor_reserved_names\n for k in reserved_names:\n if k in data:\n del data[k]\n\n out = {}\n\n for k in data.keys():\n kel = data[k].copy()\n\n el = {\"params\": {}, \"monitor\": {}}\n\n # first remove reserved names\n for attr in reserved_names:\n if attr in kel:\n del kel[attr]\n\n # then move the parameters and monitoring\n for (prefix, eldata) in ((frontendConfig.glidein_param_prefix, el[\"params\"]),\n (frontendConfig.glidein_monitor_prefix, el[\"monitor\"])):\n plen = len(prefix)\n for attr in kel.keys():\n if attr[:plen] == prefix:\n eldata[attr[plen:]] = kel[attr]\n del kel[attr]\n\n # what is left are glidein attributes\n el[\"attrs\"] = kel\n\n out[k] = el\n\n return out", "def read_cfg(file):\n result = []\n if isfile(file):\n with open(file) as f:\n cfg = json.load(f)\n for entry in cfg:\n if \"start\" in entry:\n filter = (entry[\"start\"], entry.get(\"end\", None))\n result.append(filter)\n return result", "def build_filterset():\n with open(config.COMID_REFERENCE) as fil:\n return {int(line.strip()) for line in fil}", "def get_dict(modfile):\n import codecs\n\n odict = dict()\n of = codecs.open(modfile, 'r', encoding='utf-8')\n for line in of:\n # Dictionary lines should be like:\n # /path/filename.suffix: mo_mod1 mo_mod2\n ll = line.rstrip().split(':')\n fname = ll[0]\n mods = ll[1].strip().split(' ')\n for m in mods:\n odict[m] = fname\n of.close()\n\n return odict", "def _get_params(self):\n self.pars_multidict = collections.OrderedDict()\n found_cfg = False\n if self.use_defaults:\n param_dir_branch = \"default_parameters\"\n else:\n param_dir_branch = \"user_parameters\"\n for condition in self.conditions:\n if condition in self.cfg_index.keys():\n found_cfg = True\n self.subcfgfilename = os.path.join(self.pars_dir, param_dir_branch, self.cfg_index[condition])\n self.pars_multidict[condition] = self._read_json_file()\n # if no specific cfg files can be found for the specified conditions, use the generic cfg file.\n if not found_cfg:\n self.subcfgfilename = os.path.join(self.pars_dir, param_dir_branch, self.cfg_index[\"all\"])\n self.pars_multidict[\"all\"] = self._read_json_file()", "def get_valid_values_map(self, condition=True):\n tpninfos = self.locate.get_all_tpninfos(self.instrument, self.filekind, \"ld_tpn\")\n required_keys = self.get_required_parkeys()\n valid_values = {}\n for info in tpninfos:\n if info.is_complex_constraint:\n continue\n if info.name in required_keys:\n values = info.values\n if len(values) == 1 and \":\" in values[0]:\n limits = values[0].split(\":\")\n try:\n limits = [int(float(x)) for x in limits]\n except Exception:\n pass\n # sys.exc_clear()\n else:\n values = list(range(limits[0], limits[1]+1))\n if condition:\n values = tuple([utils.condition_value(val) for val in values])\n valid_values[info.name] = values\n return valid_values", "def genpercent_ci(fname):\n \n d = {}\n \n with open(fname) as f:\n f.next()\n for l in f:\n condition, prop, lb, ub, nsuccess, n = l.strip('\\n').split(',')\n prop, lb, ub = map(float, [prop, lb, ub])\n \n cond = condition.lstrip('g')\n cond = cond.replace('GAL4', '-GAL4 - ')\n cond = cond.replace('UAS', 'UAS-')\n cond = cond.replace('dtrpa1', 'dtrpa1 - ')\n \n lci = prop - lb\n uci = ub - prop\n \n prop, lci, uci = map(str, [prop, lci, uci])\n \n d[cond] = []\n d[cond].extend([prop, lci, uci, nsuccess, n])\n \n return(d)", "def processfile(jobfile) :\n\n jobdict = {}\n\n f = open(jobfile) \n line = f.readline()\n\n\n while line !=\"\":\n #strip comments\n actual = line.split(\"#\")[0] \n if len(actual) != 0 :\n if \"=\" in actual:\n #print \"The string is \", actual\n readjob(actual, jobdict)\n\n line = f.readline()\n\n f.close()\n return jobdict", "def get_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n filter_id = row[\"Filter Column\"]\n filters.setdefault(filter_id, {})\n filters[filter_id][\"results\"] = row[\"Result\"].split(\", \")\n filters[filter_id][\"type\"] = row[\"Type\"]\n filters[filter_id][\"description\"] = ''.join(row[\"Description\"])\n return filters", "def conditions(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"conditions\")", "def get_condition(self) -> dict:\n url = self.base_url + \"/condition\"\n condition = self._session.get(url).json()\n keys = [\"bandwidth\", \"latency\", \"jitter\", \"loss\"]\n result = {k: v for (k, v) in condition.items() if k in keys}\n return result" ]
[ "0.6218148", "0.6155669", "0.61423445", "0.60241485", "0.5845813", "0.5743224", "0.5733421", "0.57185966", "0.56938577", "0.56828797", "0.5660647", "0.5644317", "0.5610162", "0.5609556", "0.5590061", "0.55852425", "0.55422974", "0.55337137", "0.553299", "0.550761", "0.549093", "0.5486676", "0.5455893", "0.5417076", "0.54123265", "0.540451", "0.5394998", "0.53879076", "0.53875554", "0.538376" ]
0.74447215
0
Determine if game is over
def is_game_over(cls): cls.record_winner() cls.record_tie()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isGameOver(self):\n pass", "def game_over(self):\n self.over = True", "def game_over(self) -> bool:\n return self.rstate.game_over()", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def is_game_over(self) -> bool:\n return self._is_game_over", "def event_game_over(self):\n print('Game over!')\n self._cmd_exit()", "def gameOver(self):\n\t\treturn self.lives == 0", "def _check_game_over(self):\n return self.game_board.check_game_over()", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over to determine if the game if over\n return self.over", "def is_over(self):\n return self.game.is_over()", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def notify_game_over(self):\n self.is_game_over = True", "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "def is_game_over(self):\n if (self.check_win(HexBoard.RED) or self.check_win(HexBoard.BLUE) or \n len(self.get_move_list())==0):\n self.game_over = True\n return self.game_over", "def game_over(self):\n return bool(self.last_round and self.last_player == self.current_player)", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def check_if_over(self):\n if self.remainingBalls == 0:\n self.check_if_won()\n self.game_over = True", "def gameover(self):\n if self._gameover:\n return True\n \n if self.terminal():\n self._gameover = True\n return True\n \n return False", "def is_game_over(self):\n return self.state.all_avatars_placed() and self.state.is_game_over()", "def game_over(self):\n return self.lives() < 0", "def is_over(self):\n winner = TictactoeMatch.get_winner(self.inputs_)\n if winner:\n self.result_ = winner\n if Config.USER['debug']['enabled']:\n print \"It is over! Player \"+str(self.result_)+\" (\"+str(self.player_label_[self.result_])+\") wins!\"\n return True\n for value in self.inputs_:\n if value == TictactoeMatch.EMPTY:\n if Config.USER['debug']['enabled']:\n print \"Go!\"\n return False\n self.result_ = TictactoeMatch.DRAW\n if Config.USER['debug']['enabled']:\n print \"It is over! Draw!\"\n return True", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def check_if_game_over():\n # Calling check for winners.\n check_for_winner()\n # Calling check it's tie or not.\n check_if_tie()", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def gameOver():\n PTS, COIN, LIVES = 0, 1, 2\n uniSprite = 0\n globalSound(\"stop\") # Stopping any music\n playSound(overSound, \"music\") # Playing game over music\n startTime = time.get_ticks()\n # Game over screen should only stay for 5 seconds\n while time.get_ticks() - startTime < 5000:\n for evnt in event.get():\n if evnt.type == QUIT:\n return \"exit\"\n # Drawing game over screen\n screen.fill(BLACK)\n uniSprite = spriteCounter(uniSprite)\n drawStats(None, None, marioScore[PTS], marioScore[COIN], time.get_ticks(), levelNum, True, True, statCoin,\n uniSprite, 0)\n screen.blit(overText,(300,300)) # Blitting game over text\n display.flip()\n fpsCounter.tick(60)\n return \"menu\"", "def game_over(self):\r\n win.blit(self.image_of_game_over, (250, 170))", "def game_over(self):\n if self.alive:\n return\n\n self.screen.fill(Color.BLACK)\n self.draw_text(\n \"GAME OVER\", WIN_CENTER, font=FONT_M, size=48, color=Color.WHITE\n )\n again = \"Press any key to play again\"\n again_pos = CENTER_W, WIN_H - BLOCK_H\n self.draw_text(again, again_pos, color=Color.WHITE)\n\n pygame.display.flip()\n self.wait_keydown()\n\n if self.running:\n self.reset()", "def is_game_over(self):\n bk = False\n wk = False\n\n # Find the kings\n for row in range(8):\n for col in range(8):\n if self.board.squares[row][col] == ChessPiece.B_KING: # Black king symbol\n bk = True\n break\n if self.board.squares[row][col] == ChessPiece.W_KING: # Black king symbol\n wk = True\n break\n\n # If a king is missing, end the game. This fixes a bug we were having\n if bk == False:\n return 1\n if wk == False:\n return 2\n\n if self.white_wins():\n return 1\n elif self.black_wins():\n return 2\n elif self.tie():\n return 3\n else:\n return 0" ]
[ "0.86978006", "0.8201276", "0.81808716", "0.8180014", "0.81176275", "0.80931705", "0.80865484", "0.8077599", "0.8070335", "0.80282694", "0.795851", "0.792699", "0.7911095", "0.7853995", "0.78158563", "0.78106606", "0.77769727", "0.7745965", "0.7730437", "0.77171916", "0.7712785", "0.76193255", "0.75879323", "0.75355756", "0.74990386", "0.7426484", "0.7424958", "0.74128526", "0.739425", "0.73870254" ]
0.8234252
1
Flip player from X to O and back
def flip_player(cls): cls.current_player = 'X' if cls.current_player == 'O' else 'O' cls.display_board() cls.prompt_player()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flip_player():\n global current_player\n # If current player is 'X', then set current player to 'O'.\n if current_player == 'X':\n current_player = 'O'\n # If current player is 'O', then set current player to 'X'.\n elif current_player == 'O':\n current_player = 'X'", "def flip(self):", "def flip(self, x, y):\n self.pieces[x + (y * self.width)].flip()", "def swap_player():\n global current_player\n if current_player == \"X\":\n current_player = \"O\"\n elif current_player == \"O\":\n current_player = \"X\"", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def _flip_turn(self):\n self._turn = self._next_turn\n return self._turn", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def switchPlayer(self):\n\t\tif (self.current is Piece.EX):\n\t\t\tself.current = Piece.OH \n\t\telse:\n\t\t\tself.current = Piece.EX", "def flip(self, p):\n return -p", "async def foggle_flip(self, ctx: Context, base: Bases = 10):\n ...", "def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def set_flip(self, val):\n self.flip = val", "def change_player_turn(self):\r\n self._player_turn *= -1", "def flip():\n #Roller turns to curl page\n pwm.ChangeDutyCycle(11)\n time.sleep(0.22)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\n\n #flipper flips\n pwm2.ChangeDutyCycle(7.8)\n time.sleep(4)\n pwm2.ChangeDutyCycle(0)\n time.sleep(3)\n\n \"\"\"#Flipper turns to flip page and flips back\n pwm2.ChangeDutyCycle(4)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(8)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(12)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(13)\n time.sleep(3)\n pwm2.ChangeDutyCycle(2.4)\n time.sleep(5)\"\"\"\n\n \"\"\"#Rollers turning back\n pwm.ChangeDutyCycle(1)\n time.sleep(0.2)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\"\"\"", "def flip_icon(icon):\n\n return (\"X\" if icon == \"O\" else \"O\")", "def reverse_move(self):\n # assign previous values to game variables and remove one entry from game history\n (self.turn_number, active_player, self.player1, self.player2) = self.history.pop()\n\n # assign the right current player\n if active_player == 'player 1':\n self.current_player = self.player1\n elif active_player == 'player 2':\n self.current_player = self.player2\n\n else: # there was an error with deciding the current player\n assert False, \" reversing moves player assignment broke \"", "def flip_faceup(self):\r\n self.faceup = True", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True", "def switch_playing_direction(position_coords):\n # just mirrors the x-coordinate in place\n position_coords[:,0::2] *= -1", "def test_switch_player(self):\n\n previous_player = self.controller.game_state.player\n\n #compare the current player to the previous player after calling the\n #flip_current_player() function\n self.controller.game_state.flip_current_player()\n self.assertNotEqual(self.controller.game_state.player, previous_player)", "def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()", "def test_switch_returns(self):\n\n #Player 1 and Player 2 are represented by 1 and -1\n #Multiplying current_player by -1 will flip them\n current_player = self.controller.game_state.player * -1\n\n #after running flip_current_player function in the controller,\n # test current player\n self.assertEqual(self.controller.game_state.flip_current_player(),\n current_player)", "def swap_player(self):\n\n # if player 1 then switch to player 2\n if self.current_player == 1:\n self.current_player += 1\n else:\n self.current_player -= 1\n self.playing_player = self.players[self.current_player]\n return self.playing_player", "def flip_cards(self):\n for card_ in self.cards:\n card_.flip()", "def flip_frame(frame, flip_code):\n return cv.flip(frame, flip_code)", "def flip():\n return random.choice((True, False))" ]
[ "0.81298226", "0.7432394", "0.6961494", "0.6864068", "0.68307495", "0.67842346", "0.6708159", "0.6708159", "0.66889834", "0.6668064", "0.65996915", "0.6585072", "0.6548481", "0.6457186", "0.6447207", "0.6433932", "0.63997906", "0.6399247", "0.6390208", "0.6349105", "0.63409334", "0.6272927", "0.62538666", "0.6216078", "0.6211353", "0.6183724", "0.6170852", "0.614946", "0.6141632", "0.6117954" ]
0.8289133
0
Store current column width to the class variable
def __store_column_width(self): self.header_width = [] for i in range(0, self.view.header().count()): self.header_width.append(self.view.columnWidth(i))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def width(self):\n return self.col", "def width(self, width):\n self.col += width", "def width(self):\n\t\tpass", "def set_column_width(self, index, width):\n self.colwid[index] = width", "def get_column_width(self, index):\n return self.colwid[index]", "def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])", "def col_width(self,column_no): \n if(column_no == 0 and self.serialize):\n return self.col_width_dict['_serial_'] \n column = self.columns[column_no - (1 if self.serialize else 0)]\n return self.col_width_dict[column]", "def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2", "def _refresh_width(self):\n self._width = curses.tigetnum('cols')\n self._writer = formatter.DumbWriter(self._output, maxcol=self._width)", "def get_grid_width(self):\r\n # replace with your code\r\n return self.grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._width", "def width(cls):\n return cls._width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)", "def width(self) -> int:", "def width(self) -> int:", "def setColumnWidth(self, column, newWidth = None):\n\t\t\t\tdef yieldWidths():\n\t\t\t\t\tfor i, row in enumerate(self.thing.iter_rows(), start = 1):\n\t\t\t\t\t\twidth = self.getCellWidth(i, column)\n\t\t\t\t\t\tif (width is not None):\n\t\t\t\t\t\t\tyield width\n\n\t\t\t\tif (newWidth is None):\n\t\t\t\t\t#Find the longest cell in the column\n\t\t\t\t\tpossibleWidths = tuple(yieldWidths())\n\t\t\t\t\tif (possibleWidths):\n\t\t\t\t\t\tnewWidth = max(possibleWidths)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewWidth = -1 #Compensate for blank columns\n\n\t\t\t\t#Apply the new width\n\t\t\t\tnewWidth += 2\n\t\t\t\tself.thing.column_dimensions[openpyxl.utils.get_column_letter(column)].width = newWidth", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def GetWidth(self):\r\n\r\n return self._total_col_width", "def total_width(self): \n total = 0\n for i in range(self.no_of_columns()):\n total += self.col_width(i)\n return total", "def get_column_width(self):\n return \"%.2f\\\\locallinewidth\" % self._col_width[self._cell_in_row-1]", "def widths(self):\n return self._widths", "def width(self):\n return len(self.rows[0])", "def _SetWidth(self, column_index, content_length):\n # Updates the width at position column_index to be the max of the existing\n # value and the new content's length, or this instance's max_column_width if\n # the value would be greater than max_column_width.\n if column_index == len(self._widths):\n self._widths.append(0)\n\n new_width = max(self._widths[column_index], content_length)\n if self._max_column_width is not None:\n new_width = min(self._max_column_width, new_width)\n self._widths[column_index] = new_width" ]
[ "0.7448393", "0.7195969", "0.6882216", "0.6862825", "0.68365276", "0.67640996", "0.6689337", "0.6625792", "0.66146314", "0.65848327", "0.6570916", "0.6541092", "0.6492291", "0.6492291", "0.6472699", "0.6472699", "0.64391994", "0.64391994", "0.6408329", "0.6369815", "0.6369815", "0.6362952", "0.6362558", "0.6362558", "0.6359359", "0.6295413", "0.6281188", "0.6274148", "0.62514335", "0.62309843" ]
0.73608655
1
The name of the world
def world_name(self) -> str: return os.path.basename(self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_world_name(self):\n dt = datetime.now()\n return f'onboarding_world_{dt.strftime(\"%H-%M-%S\")}'", "def _getName(self):\n return 'HERE'", "def name():\n pass", "def name():\n pass", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name() -> str:\n pass", "def get_name():", "def name(self) -> str:\r\n return f\"{NAME}: {self.location_name}\"", "def name(self):\n pass", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def name(self):\r\n pass", "def get_name():\n return \"Boss\"", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def get_name(self):\n # <<-- Creer-Merge: get-name -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n return \"Checkers Python Player\" # REPLACE THIS WITH YOUR TEAM NAME\n # <<-- /Creer-Merge: get-name -->>", "def get_name() -> str:\n pass", "def get_name(self):\n return self.publicArea.get_name()", "def name(self) -> str: # pragma: no cover", "def name(self): \n\t\treturn self._name" ]
[ "0.7873946", "0.71849513", "0.716453", "0.716453", "0.69763964", "0.69763964", "0.69763964", "0.69763964", "0.69763964", "0.69763964", "0.697512", "0.68596095", "0.6833829", "0.68063414", "0.67995363", "0.67995363", "0.67995363", "0.67995363", "0.6779754", "0.6778933", "0.6777429", "0.6777429", "0.6777429", "0.6777429", "0.6777429", "0.6727877", "0.6704215", "0.6696558", "0.66460025", "0.66374296" ]
0.7815104
1
Get the options for a specific plugin. This will be the list of options that is registered and loaded by the specified plugin.
def get_plugin_options(name): return get_plugin_loader(name).get_options()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def options() -> List:\n return list(c.value for c in Plugin)", "def list_plugin_options(request):\n options = {}\n options.update(plugin.get_plugin_options(request.matchdict['plugin']))\n options.update(plugin.get_plugin_vizoptions(request.matchdict['plugin']))\n return options", "def get_options(self):\r\n return self._option_values", "def _get_options(self):\n return self.options", "def get_options(self):\n\t\treturn self.options", "def options(self):\n\t\treturn self.config_parser.options(self.section_name)", "def get_all_options(self): \n return self._options.items()", "def get_options(self):\n return []", "def options(self):\n return list(self._moptions.keys())", "async def get_plugin_config(self, **kwargs) -> Any:\n namespace = self._get_namespace(**kwargs)\n return await self.AD.plugins.get_plugin_meta(namespace)", "def get_options(self):\n if self.parent is not None:\n all_options = self.parent.get_options()\n else:\n all_options = []\n all_options.extend(self.options)\n return all_options", "def get_options(self, options=[], blogid=1):\n return self.execute('wp.getOptions', blogid, self.username, self.password, options)", "def get_plugin_settings(plugin, directory=None):\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n return plugins.get(plugin) if isinstance(plugins, dict) else None", "def get_tool_options(name, fmt='json', **kwargs):\n plugin = load_plugins('tool', name)[name]\n return plugin.get_tool_options(fmt, **kwargs)", "def get_extension_options(self):\n options = []\n for extension in self.extensions:\n extension_options = getattr(extension, \"OPTIONS\", None)\n if extension_options:\n options.extend(extension_options)\n return options", "def plugin_options(request):\n options = (\n '--rabbit-amqp-uri',\n '--rabbit-api-uri'\n )\n\n args = [\n \"{}={}\".format(opt, request.config.getoption(opt)) for opt in options\n ]\n return args", "def get_options(cls, player, context={}):\n\t\traise NotImplementedError()", "def get_plugins(self) -> dict:\n return Config.get_plugins()", "def options(self):\r\n return self._options", "def options(self, section):\n try:\n return list(self._dict[section])\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def options(self):\n return self.__options", "def get_options(self):\n return (\n Option('-H', '--host',\n dest='host',\n default=self.host,\n help='IP address or hostname of the Glancesync server.'),\n\n Option('-p', '--port',\n dest='port',\n type=int,\n default=self.port,\n help='Port in which the GlanceSync server is running'),\n\n Option('-w', '--workers',\n dest='workers',\n type=int,\n default=self.workers,\n help='Number of concurrent workers to be launched, usually 2*core numbers+1.'),\n )", "async def get_options(self):", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def get_options(self):\n return self._scoped_options", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None" ]
[ "0.79400194", "0.7497427", "0.7103521", "0.70529085", "0.70166826", "0.6732522", "0.6621489", "0.6575808", "0.6427052", "0.64174426", "0.6388949", "0.6343626", "0.6336009", "0.6335064", "0.6332171", "0.6314137", "0.6260486", "0.62462914", "0.6233066", "0.6188137", "0.6175025", "0.6160741", "0.6154006", "0.6147905", "0.6147905", "0.6147905", "0.6147905", "0.6147905", "0.6130036", "0.6098907" ]
0.7943178
0
Create a plugin from the options available for the loader. Given the options that were specified by the loader create an appropriate plugin. You can override this function in your loader. This used to be specified by providing the plugin_class property and this is still supported, however specifying a property didn't let you choose a plugin type based upon the options that were presented. Override this function if you wish to return different plugins based on the options presented, otherwise you can simply provide the plugin_class property. Added 2.9
def create_plugin(self, **kwargs): return self.plugin_class(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_from_options(self, **kwargs):\n missing_required = [o for o in self.get_options()\n if o.required and kwargs.get(o.dest) is None]\n\n if missing_required:\n raise exceptions.MissingRequiredOptions(missing_required)\n\n return self.create_plugin(**kwargs)", "def load_plugin(plugin_class_path, **plugin_kwargs):\n plugin_class = class_from_class_path(plugin_class_path)\n plugin = plugin_class(**plugin_kwargs)\n return plugin", "def __post_init__(self, pluginOptions: Optional[dict[str, RuntimeOptionValue]]) -> None:\n if pluginOptions:\n existingBaseOptions = sorted(\n optionName\n for optionName in pluginOptions.keys()\n if hasattr(self, optionName)\n )\n if existingBaseOptions:\n raise RuntimeOptionsException(_('Provided plugin options already exist as base options {}').format(existingBaseOptions))\n for optionName, optionValue in pluginOptions.items():\n setattr(self, optionName, optionValue)\n if (self.entrypointFile is None and\n not self.proxy and\n not self.plugins and\n not pluginOptions and\n not self.webserver):\n raise RuntimeOptionsException(_('Incorrect arguments'))\n if self.webserver and not hasWebServer():\n raise RuntimeOptionsException(_(\"Webserver option requires webserver module\"))\n if self.webserver and any((\n self.entrypointFile, self.importFiles, self.diffFile, self.versReportFile,\n self.factsFile, self.factListCols, self.factTableFile, self.factTableCols,\n self.relationshipCols, self.conceptsFile, self.preFile, self.tableFile, self.calFile,\n self.dimFile, self.anchFile, self.formulaeFile, self.viewArcrole, self.viewFile,\n self.roleTypesFile, self.arcroleTypesFile\n )):\n raise RuntimeOptionsException(_('Incorrect arguments with webserver'))", "def make_plugin(audiences=None, token_url=None, nonce_timeout=None, **kwds):\n # You *must* specify the \"audiences\" parameter since it's integral\n # to the security of the protocol. If you want it set to None to\n # allow checking based on HTTP_HOST, set it to the empty string.\n if audiences is None:\n raise ValueError('You must specify the \"audiences\" parameter')\n if not audiences:\n audiences = None\n elif isinstance(audiences, basestring):\n audiences = audiences.split()\n # Load the token manager, possibly from a class+args.\n token_manager = _load_from_callable(\"token_manager\", kwds)\n # Load the VEP verifier, possibly from a class+args.\n # Assume \"urlopen\" is a dotted-name of a callable.\n verifier = _load_from_callable(\"verifier\", kwds, converters={\n \"urlopen\": resolveDotted\n })\n # If there are any kwd args left over, that's an error.\n for unknown_kwd in kwds:\n raise TypeError(\"unknown keyword argument: %s\" % unknown_kwd)\n plugin = VEPAuthPlugin(audiences, token_url, token_manager, verifier,\n nonce_timeout)\n return plugin", "def create_r2plugin(self, **kwargs):\n return self.create_tool(cls=R2Plugin, **kwargs)", "def getInstance(config):\n return Plugin(config)", "def getInstance(config):\n return Plugin(config)", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def get_plugin_options(name):\n return get_plugin_loader(name).get_options()", "def __init__(\n self,\n label : str,\n debug : bool = False,\n **kw\n ):\n super().__init__('plugin', label=label, **kw)\n\n import os, pathlib, sys\n from meerschaum.utils.warnings import error, warn\n from meerschaum.config._paths import PLUGINS_RESOURCES_PATH\n if str(PLUGINS_RESOURCES_PATH.parent) not in sys.path:\n sys.path.append(str(PLUGINS_RESOURCES_PATH.parent))\n\n self.resource_path = None\n for _plugin in os.listdir(PLUGINS_RESOURCES_PATH):\n plugin = _plugin.replace('.py', '')\n if plugin == self.label:\n self.resource_path = pathlib.Path(os.path.join(PLUGINS_RESOURCES_PATH, plugin))\n break\n if not self.resource_path:\n error(f\"Plugin '{self.label}' cannot be found. Is it installed?\")\n\n self.fetch = None\n try:\n exec(f'from plugins.{self.label} import fetch; self.fetch = fetch')\n except Exception as e:\n pass\n\n self.sync = None\n try:\n exec(f'from plugins.{self.label} import sync; self.sync = sync')\n except Exception as e:\n pass\n\n if self.fetch is None and self.sync is None:\n error(f\"Could not import `fetch()` or `sync()` methods for plugin '{self.label}'\")", "def get_feature_extractor_from_options(options):\n if options.feature_extractor == 'precropped':\n return GraphConstructor()\n if options.feature_extractor == 'instance_segmenter':\n loss_weights = {'loss_rpn_cls':options.loss_rpn_cls,\n 'loss_rpn_loc':options.loss_rpn_loc,\n 'loss_cls':options.loss_cls,\n 'loss_box_reg':options.loss_box_reg,\n 'loss_mask':options.loss_mask}\n return InstanceSegmenter(enable_seg_losses=options.enable_seg_losses, loss_weights=loss_weights)", "def plugin_class() -> Type[\"NitpickPlugin\"]:\n return TextPlugin", "def __init__(\n self,\n plugin_id,\n plugin_name,\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n ):\n (\n self.__plugin_id,\n self.__plugin_names,\n self.__plugin_description,\n self.__plugin_instance,\n self.__plugin_enabled_by_default,\n self.__plugin_version,\n self.__plugin_interface_version,\n self.__plugin_file_name,\n self.__plugin_url,\n self.__plugin_configuration,\n ) = (\n plugin_id.strip().lower(),\n [],\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n )\n for next_name in plugin_name.lower().split(\",\"):\n next_name = next_name.strip()\n if next_name:\n self.__plugin_names.append(next_name)", "def load_parsers_from_plugins(subparser, plugins):\n for plugin_name, plugin_class in plugins.items():\n # create a parser object for the plugin.\n plugin_parser = subparser.add_parser(\n plugin_name,\n description = plugin_class.__doc__,\n )\n\n plugin_parser.add_argument('vpc_name', help='The VPC\\'s Name tag.')\n\n try:\n # Assume class plugin with 'setup_parser' and 'main' staticmethods.\n plugin_class.setup_parser(plugin_parser)\n plugin_parser.set_defaults(func = plugin_class.main)\n except AttributeError:\n # Assume function plugin w/o 'setup_parser' or 'main' staticmethods.\n plugin_parser.set_defaults(func = plugin_class)", "def getPlugin(self, *args):\n return _libsbml.SBase_getPlugin(self, *args)", "def classFactory(iface):\n from .plugin_builder import PluginBuilder\n return PluginBuilder(iface)", "def get_plugin(self, name):", "def get_plugin(self, config_name) -> type:\n if config_name not in (plugin_names := self._subclasses.keys()):\n raise ValueError(\n f\"The plugin {config_name} has not been registered. Valid options: \"\n f\"{', '.join([plugin for plugin in plugin_names])}.\"\n )\n\n return self._subclasses[config_name]", "def get_dynamic_class_instantiation(cls, package_name, module_name, parameters=None):\n logger.debug(\"Run plugin %s with parameter %s\" % (module_name, parameters))\n module_name_with_path = package_name + \".\" + module_name.lower() + \".\" + module_name.lower()\n mod = __import__(module_name_with_path, fromlist=[module_name])\n try:\n klass = getattr(mod, module_name)\n except AttributeError:\n logger.debug(\"Error: No module named %s \" % module_name)\n raise ModuleNotFoundError(\"The module %s does not exist in package %s\" % (module_name, package_name))\n\n if klass is not None:\n # run the plugin\n if not parameters:\n return klass()\n elif isinstance(parameters, dict):\n return klass(**parameters)\n else:\n return klass(parameters)\n return None", "def cli_load_plugin(self, args) -> str:\n plugin_name = args.plugin_name\n current_dir = os.path.dirname(os.path.realpath(__file__))\n if not os.path.isfile(\"{}/{}.py\".format(current_dir, plugin_name)):\n return error(\"Plugin {} DNE\".format(plugin_name))\n\n # First, let's see if this is already imported\n module_name = \"plugins.{}\".format(plugin_name)\n if module_name in sys.modules:\n self.cli_unload_plugin(plugin_name)\n mod = sys.modules[module_name]\n importlib.reload(mod)\n self.register_plugin(get_class(mod, plugin_name))\n return ok(\"Plugin {} reloaded\".format(plugin_name))\n\n importlib.invalidate_caches()\n mod = importlib.import_module(module_name)\n self.register_plugin(get_class(mod, plugin_name))\n return ok(\"Plugin {} loaded\".format(plugin_name))", "def load_plugins():\n try:\n plugin_configuration = Configuration().Plugins\n except AttributeError:\n return {}\n else:\n def create_instance(plugin):\n return getattr(import_module(name=f\"tardis.plugins.{plugin.lower()}\"), f'{plugin}')()\n return {plugin: create_instance(plugin) for plugin in plugin_configuration.keys()}", "def import_plugin(plugin_module_name: str, plugin_class_name: str) -> BasePlugin:\n plugin_module = importlib.import_module(plugin_module_name)\n plugin_class = getattr(plugin_module, plugin_class_name)\n if not issubclass(plugin_class, BasePlugin):\n raise Exception(f\"{plugin_module_name}.{plugin_class_name} must be a subclass of {BasePlugin}\")\n return plugin_class", "def uctt_plugin_factory_provisioner_config(\n environment: Environment, instance_id: str = ''):\n return ProvisionerCliPlugin(environment, instance_id)", "def load_plugin(name, plugin_context, globals_=None):\n \n # remap friendly names to actual implementation. this is a bit of\n # abstraction leakage and should instead be dynamically registered\n # (though much more expensive).\n\n #TODO: this is a hack until rosh_testing is a proper framework/plugin\"\n if name == 'rosh.impl.testing':\n import rosh.impl.testing\n rosh.impl.testing.load_rosh_plugin('rosh.impl.ros_testing', plugin_context, globals_)\n return\n \n try:\n # make sure plugin exists for accurate error reporting\n roslib.packages.get_pkg_dir(name)\n except roslib.packages.InvalidROSPkgException, e:\n raise NoPlugin(\"invalid plugin [%s]: no ROS package named [%s]\"%(name, name))\n try:\n roslib.load_manifest(name)\n except roslib.packages.InvalidROSPkgException, e:\n raise InvalidPlugin(\"Failed to locate dependencies of plugin [%s]: \\n[%s]\"%(name, str(e)))\n try:\n m = __import__(name)\n except ImportError:\n raise InvalidPlugin(\"invalid plugin [%s]: python module [%s] import failed\"%(name, name))\n try:\n loader = getattr(m, 'rosh_plugin_load')\n except AttributeError, e:\n raise InvalidPlugin(\"invalid plugin [%s]: plugin is missing rosh_plugin_load() entry point\"%(name))\n\n try:\n plugin_data = loader(plugin_context, globals_)\n except Exception, e:\n raise InvalidPlugin(\"invalid plugin [%s]: plugin raised Exception on load: %s\"%(name, e))\n errors = []\n if plugin_data is not None:\n \n # Wire up plugins. Although the plugins could do this\n # themselves, de-coupling provides better protection against\n # future implementation changes and is also easier to write\n # tests for.\n\n # - We load as much as we can, record the errors, then\n # re-raise at the end.\n for plugin_api_id, callback in plugin_data.apis.iteritems():\n try:\n plugin_context._register_api(plugin_api_id, callback)\n except Exception, e:\n errors.append(e)\n\n for plugin_api_id, args in plugin_data.handlers.iteritems():\n try:\n plugin_context._register_handler(plugin_api_id, args)\n except Exception, e:\n errors.append(e)\n\n if errors:\n error_str = [str(e) for e in errors]\n raise ROSHException(\"errors loading plugin [%s]: \\n%s\"%(name, '\\n'.join(error_str)))", "def plugin_for_target(self, target):\r\n try:\r\n if isinstance(target, str):\r\n self.log.debug(\"Attempting to match string target (%s)\" % target)\r\n plugin_name = self._targets.get(tuple([target]))\r\n if not plugin_name:\r\n raise ImageFactoryException(\"No plugin .info file loaded for target: %s\" % (target))\r\n plugin = __import__('%s.%s' % (PKG_STR, plugin_name), fromlist=['delegate_class'])\r\n return plugin.delegate_class()\r\n elif isinstance(target, tuple):\r\n _target = list(target)\r\n self.log.debug(\"Attempting to match list target (%s)\" % (str(_target)))\r\n for index in range(1, len(target) + 1):\r\n plugin_name = self._targets.get(tuple(_target))\r\n if not plugin_name:\r\n _target[-index] = None\r\n else:\r\n plugin = __import__('%s.%s' % (PKG_STR, plugin_name), fromlist=['delegate_class'])\r\n return plugin.delegate_class()\r\n except ImportError as e:\r\n self.log.exception(e)\r\n raise ImageFactoryException(\"Unable to import plugin for target: %s\" % str(target))", "def __register_individual_plugin(\n self,\n plugin_instance,\n instance_file_name,\n command_line_enabled_rules,\n command_line_disabled_rules,\n properties,\n ):\n\n plugin_object = self.__get_plugin_details(plugin_instance, instance_file_name)\n\n next_key = plugin_object.plugin_id\n if not PluginManager.__id_regex.match(next_key):\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with id '{next_key}' as id is not a valid id in the form 'aannn' or 'aaannn'.\"\n )\n\n if next_key in self.__all_ids:\n found_plugin = self.__all_ids[next_key]\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with id '{next_key}' as plugin '{found_plugin.plugin_file_name}' is already registered with that id.\"\n )\n self.__all_ids[next_key] = plugin_object\n\n for next_key in plugin_object.plugin_names:\n if not PluginManager.__name_regex.match(next_key):\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with name '{next_key}' as name is not a valid name in the form 'an-an'.\"\n )\n if next_key in self.__all_ids:\n found_plugin = self.__all_ids[next_key]\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with name '{next_key}' as plugin '{found_plugin.plugin_file_name}' is already registered with that name.\"\n )\n self.__all_ids[next_key] = plugin_object\n if not plugin_object.plugin_description.strip():\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with a description string that is blank.\"\n )\n if not PluginManager.__version_regex.match(plugin_object.plugin_version):\n raise ValueError(\n f\"Unable to register plugin '{instance_file_name}' with a version string that is not a valid semantic version.\"\n )\n\n self.__registered_plugins.append(plugin_object)\n if self.__determine_if_plugin_enabled(\n plugin_object,\n command_line_enabled_rules,\n command_line_disabled_rules,\n properties,\n ):\n self.__enabled_plugins.append(plugin_object)", "def load_plugin(plugin_name, default_plugin_module):\n try:\n if \".\" in plugin_name:\n # Assume external plugin, full path\n plugin_mod_name = plugin_name\n plugin_class_name = plugin_name.split(\".\")[-1].capitalize()\n else:\n # One of the built-in plugins\n plugin_mod_name = \"%s.%s\" % (default_plugin_module, plugin_name)\n plugin_class_name = plugin_name.capitalize()\n\n plugin_mod = importlib.import_module(plugin_mod_name)\n plugin_class = getattr(plugin_mod, plugin_class_name)\n return plugin_class\n except ImportError as e:\n raise PluginError(\"Cannot load '%s'\" % plugin_mod_name)\n except AttributeError:\n raise PluginError(\"Cannot find plugin class '%s' in \"\n \"plugin '%s'\" %\n (plugin_class_name, plugin_mod_name))\n except Exception as e:\n raise PluginError(\"Error while loading plugin '%s': %s\" %\n (plugin_mod_name, str(e)))", "def loadPlugin(*args, addCallback: Script=None, allPlugins: bool=True, name: AnyStr=\"\", quiet:\n bool=True, removeCallback: Script=None, **kwargs)->List[AnyStr]:\n pass", "def _plugin_create(cls, plugin_dir):\n plugin_path = os.path.join(settings.PLUGINS_PATH, plugin_dir,\n 'metadata.yaml')\n try:\n plugin_metadata = cls._parse_yaml_file(plugin_path)\n Plugin.create(plugin_metadata)\n except Exception as e:\n logger.error(\"cannot create plugin {0} from FS. Reason: {1}\"\n .format(plugin_dir, str(e)))", "def apply_plugin_settings(self, options):\n pass" ]
[ "0.71200705", "0.6517006", "0.5809091", "0.5786941", "0.5762168", "0.5752947", "0.5752947", "0.573117", "0.5687062", "0.5669033", "0.56606823", "0.56198794", "0.5616531", "0.5590346", "0.5573315", "0.5566388", "0.5536888", "0.5531279", "0.55006045", "0.54732233", "0.54677445", "0.5460529", "0.5455779", "0.54224974", "0.5378612", "0.5311913", "0.53008586", "0.52748764", "0.52715313", "0.5206195" ]
0.74608773
0
Create a plugin from the arguments retrieved from get_options. A client can override this function to do argument validation or to handle differences between the registered options and what is required to create the plugin.
def load_from_options(self, **kwargs): missing_required = [o for o in self.get_options() if o.required and kwargs.get(o.dest) is None] if missing_required: raise exceptions.MissingRequiredOptions(missing_required) return self.create_plugin(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def setup_arguments_parser():\n\tglobal parser\n\n\tparser.plugin_add_argument('-a', '--args', dest = 'plugin_args', \n\t\tdefault = \"\",\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tArguments to send to the plugin. For a list of arguments to send to a specific plugin, use \"-l [#|name]\". Use \"-l all\" to list all plugins.\n\n\t\t\tEx: \n\t\t\tif the plugin takes the arguments...\n\n\t\t\t'-p [SOME NUMBER]' and '-u [SOME TEXT]'\n\n\t\t\tyou would input: -a \"-p 123 -u foo\"''')\n\n\tparser.plugin_add_argument('-p', '--plugin', dest = 'plugin_name', \n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\t-p [#|name], or --plugin [#|name] \n\n\t\t\tName of plugin to use. Use the flag -l all or --list all to view a list of all available site plugins.''')\n\n\tparser.plugin_add_argument('-l', '--list', dest = 'plugin_list',\n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tProvide a numbered listing of all plugins found. \n\n\t\t\tValid Parameters for PLUGIN_LIST:\n\t\t\t * all: for a list of all plugins.\n\n\t\t\t * #|name: to see more information on a specific plugin. You may use either the plugin # or the plugin name (if any) that is shown via \"-l all\"''')\n\n\tparser.plugin_add_argument('-o', '--out', dest = 'output_path',\n\t\tdefault = os.getcwd(),\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tPath to output to. If the path does not exist, it will be created. By default, this is the CWD.''')\n\n\tparser.plugin_add_argument('-w', '--wget', dest = 'wget_bin',\n\t\tdefault = None,\n\t\taction = 'store',\n\t\thelp = '''\n\t\t\tPath to wget executable, if available.''')", "def __post_init__(self, pluginOptions: Optional[dict[str, RuntimeOptionValue]]) -> None:\n if pluginOptions:\n existingBaseOptions = sorted(\n optionName\n for optionName in pluginOptions.keys()\n if hasattr(self, optionName)\n )\n if existingBaseOptions:\n raise RuntimeOptionsException(_('Provided plugin options already exist as base options {}').format(existingBaseOptions))\n for optionName, optionValue in pluginOptions.items():\n setattr(self, optionName, optionValue)\n if (self.entrypointFile is None and\n not self.proxy and\n not self.plugins and\n not pluginOptions and\n not self.webserver):\n raise RuntimeOptionsException(_('Incorrect arguments'))\n if self.webserver and not hasWebServer():\n raise RuntimeOptionsException(_(\"Webserver option requires webserver module\"))\n if self.webserver and any((\n self.entrypointFile, self.importFiles, self.diffFile, self.versReportFile,\n self.factsFile, self.factListCols, self.factTableFile, self.factTableCols,\n self.relationshipCols, self.conceptsFile, self.preFile, self.tableFile, self.calFile,\n self.dimFile, self.anchFile, self.formulaeFile, self.viewArcrole, self.viewFile,\n self.roleTypesFile, self.arcroleTypesFile\n )):\n raise RuntimeOptionsException(_('Incorrect arguments with webserver'))", "def do_plugin_create(cc, args):\n\n field_list = ['name', 'code', 'callable', 'public', 'extra']\n\n fields = dict((k, v) for (k, v) in vars(args).items()\n if k in field_list and not (v is None))\n\n fields = utils.args_array_to_dict(fields, 'extra')\n\n fl = fields['code']\n with open(fl, 'r') as fil:\n fields['code'] = fil.read()\n\n if args.params:\n fields['parameters'] = utils.json_from_file(args.params)\n\n plugin = cc.plugin.create(**fields)\n\n data = dict([(f, getattr(plugin, f, '')) for f in\n res_fields.PLUGIN_DETAILED_RESOURCE.fields])\n\n cliutils.print_dict(data, wrap=72, json_flag=args.json)", "def do_plugin(self, args):\n #TODO GET A LIST OF PARAMETERS FROM THE PLUGIN\n lex_parser = shlex.shlex(args.strip(), posix=True)\n lex_parser.whitespace += \"=\"\n lex_parser.escapedquotes = \"\"\n lex_parser.wordchars += \"~-./*?=\"\n tokens = [token for token in lex_parser]\n\n _MOD_LOGGER.debug(\"Plugin tokens: %s\", tokens)\n\n if len(tokens) < 1:\n print(\"Error, invalid command.\")\n return\n\n if tokens[0] == \"list\":\n self.print_plugin_list()\n elif tokens[0] == \"set\":\n self.set_plugin(tokens[1:])\n else:\n print(\"Error, invalid command.\")", "def make_plugin(audiences=None, token_url=None, nonce_timeout=None, **kwds):\n # You *must* specify the \"audiences\" parameter since it's integral\n # to the security of the protocol. If you want it set to None to\n # allow checking based on HTTP_HOST, set it to the empty string.\n if audiences is None:\n raise ValueError('You must specify the \"audiences\" parameter')\n if not audiences:\n audiences = None\n elif isinstance(audiences, basestring):\n audiences = audiences.split()\n # Load the token manager, possibly from a class+args.\n token_manager = _load_from_callable(\"token_manager\", kwds)\n # Load the VEP verifier, possibly from a class+args.\n # Assume \"urlopen\" is a dotted-name of a callable.\n verifier = _load_from_callable(\"verifier\", kwds, converters={\n \"urlopen\": resolveDotted\n })\n # If there are any kwd args left over, that's an error.\n for unknown_kwd in kwds:\n raise TypeError(\"unknown keyword argument: %s\" % unknown_kwd)\n plugin = VEPAuthPlugin(audiences, token_url, token_manager, verifier,\n nonce_timeout)\n return plugin", "def test_plugin_initialize_from_args(self):\n sys.argv.append('-t')\n p = PluginCustom()\n self.assertEqual('yourah', p.toto)", "def create_r2plugin(self, **kwargs):\n return self.create_tool(cls=R2Plugin, **kwargs)", "def from_options(cls,\n name=None,\n potential_source=None,\n potential_directory=None,\n function_name=None,\n arguments=None,\n wrap_potential=False,\n **params\n ):\n\n src = potential_source\n\n params = ParameterManager(**params)\n\n\n if potential_directory is None:\n from ..Interface import RynLib\n potential_directory = RynLib.potential_directory()\n\n if wrap_potential:\n src = cls.wrap_potential(\n name, src,\n potential_directory=potential_directory,\n arguments=arguments,\n **params.filter(PotentialTemplate)\n )\n\n # prepare args for the loader\n loader = PotentialLoader(\n name,\n src,\n load_path=[os.path.join(potential_directory, name), src],\n **params.filter(PotentialLoader)\n )\n\n # set up spec\n callable = loader.call_obj\n if isinstance(callable, FFIModule):\n spec = callable.get_method(function_name)\n else:\n spec = PotentialArgumentSpec(arguments, name=function_name)\n\n # then set up caller\n caller = PotentialCaller(\n callable,\n function_name,\n **params.filter(PotentialCaller)\n )\n\n return cls(name, spec, caller, **params.filter(cls))", "def new_plugin(ctx, **defaults):\n from .quickstart import plugin_quickstart\n\n project = ctx.get_project(silent=True)\n plugin_quickstart(defaults, project=project)", "def create(self, *args, **kwargs):\n factory = self.factory\n\n kwargs = coalesce_options(dict(kwargs),\n self.option_types)\n\n return factory(*args, **kwargs)", "def parse_args():\n parser = argparse.ArgumentParser(\n description='fpb is a fuel plugin builder which '\n 'helps you create plugin for Fuel')\n\n # TODO(vsharshov): we should move to subcommands instead of\n # exclusive group, because in this case we could not\n # support such behavior [-a xxx | [-b yyy -c zzz]]\n group = parser.add_mutually_exclusive_group(required=True)\n\n group.add_argument(\n '--create', help='create a plugin skeleton',\n type=decode_string, metavar='plugin_name')\n group.add_argument(\n '--build', help='build a plugin',\n type=decode_string, metavar='path_to_directory')\n group.add_argument(\n '--check', help='check that plugin is valid',\n type=decode_string, metavar='path_to_directory')\n\n parser.add_argument(\n '--debug', help='enable debug mode',\n action=\"store_true\")\n\n parser.add_argument(\n '--package-version', help='which package version to use',\n type=decode_string)\n\n result = parser.parse_args()\n package_version_check(result, parser)\n\n return result", "def __init__(self, *args, **kwargs):\r\n if args:\r\n self.addonId = args[0]\r\n else:\r\n posIni = len('plugin://')\r\n posFin = sys.argv[0].find('/', posIni)\r\n addonId = sys.argv[0][posIni:posFin]\r\n self.addonId = kwargs.get('id',None) or addonId", "def __init__(\n self,\n label : str,\n debug : bool = False,\n **kw\n ):\n super().__init__('plugin', label=label, **kw)\n\n import os, pathlib, sys\n from meerschaum.utils.warnings import error, warn\n from meerschaum.config._paths import PLUGINS_RESOURCES_PATH\n if str(PLUGINS_RESOURCES_PATH.parent) not in sys.path:\n sys.path.append(str(PLUGINS_RESOURCES_PATH.parent))\n\n self.resource_path = None\n for _plugin in os.listdir(PLUGINS_RESOURCES_PATH):\n plugin = _plugin.replace('.py', '')\n if plugin == self.label:\n self.resource_path = pathlib.Path(os.path.join(PLUGINS_RESOURCES_PATH, plugin))\n break\n if not self.resource_path:\n error(f\"Plugin '{self.label}' cannot be found. Is it installed?\")\n\n self.fetch = None\n try:\n exec(f'from plugins.{self.label} import fetch; self.fetch = fetch')\n except Exception as e:\n pass\n\n self.sync = None\n try:\n exec(f'from plugins.{self.label} import sync; self.sync = sync')\n except Exception as e:\n pass\n\n if self.fetch is None and self.sync is None:\n error(f\"Could not import `fetch()` or `sync()` methods for plugin '{self.label}'\")", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def define_options(parser=None, usage=None, conflict_handler='resolve'):\n if parser is None:\n parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler)\n\n parser.add_argument('-i', '--instrument', type=str, default=None, choices=['niriss', 'nircam', 'nirspec', 'miri', 'fgs'], help='Instrument. (default=%(default)s)')\n parser.add_argument('-p', '--prev_or_thumb', type=str, default=None, choices=['p', 't'], help='Work on preview images (p) or thumbnails (t)')\n parser.add_argument('-s', '--str_to_exclude', type=str, help='String controlling which entries are removed.')\n return parser", "def create_options(**kwargs):\n kwargs.setdefault(\"select\", [])\n kwargs.setdefault(\"extended_default_select\", [])\n kwargs.setdefault(\"extended_default_ignore\", [])\n kwargs.setdefault(\"extend_select\", [])\n kwargs.setdefault(\"ignore\", [])\n kwargs.setdefault(\"extend_ignore\", [])\n kwargs.setdefault(\"disable_noqa\", False)\n kwargs.setdefault(\"enable_extensions\", [])\n kwargs.setdefault(\"per_file_ignores\", [])\n return argparse.Namespace(**kwargs)", "def __init__(self, args):\n ClientPlugin.__init__(self)\n self.args = args", "def set_plugin_args(self, args: Dict[str, Any], plugin_key: Optional[str] = None):\n plugin_key = plugin_key or self._plugin_key\n plugin = self._get_plugin_conf(plugin_key)\n plugin['args'] = args\n return self", "def setup_args_create(parser):\n parser.add_argument(\"--domain\", required=False)\n parser.add_argument(\"--ansible\", required=False,\n dest=\"ansible\", action=\"store_true\")\n return parser", "def __init__(\n self,\n plugin_id,\n plugin_name,\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n ):\n (\n self.__plugin_id,\n self.__plugin_names,\n self.__plugin_description,\n self.__plugin_instance,\n self.__plugin_enabled_by_default,\n self.__plugin_version,\n self.__plugin_interface_version,\n self.__plugin_file_name,\n self.__plugin_url,\n self.__plugin_configuration,\n ) = (\n plugin_id.strip().lower(),\n [],\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n )\n for next_name in plugin_name.lower().split(\",\"):\n next_name = next_name.strip()\n if next_name:\n self.__plugin_names.append(next_name)", "def parse_args(self, args=None):\n # this line is necessary, the args = sys.argv[1:] in function\n # definition seems doesn't works\n if not args:\n args = sys.argv[1:]\n # fix the usage of the command python scout-cmd.py ...\n if len(args) == 1 and args[0].find(\"scout\") != -1:\n args = args[1:]\n\n if len(args) == 0:\n # if none of the argument was defined, show help\n raise HelpOptionFound()\n\n core_args, self._module_args = self._split_argument_line(args)\n\n # try to load the module name\n module = None\n if len(self._module_args) != 0:\n module = self._module_args[0]\n\n opts, args = self._parser.parse_args(core_args)\n\n # no HelpOptionFound raised, the module name is mandatory\n # FIXME: this would be rewritten\n if not opts.listing and len(self._module_args) == 0:\n msg = _('The name of module is mandatory. '\n 'Use %s --help') % (self._prog)\n raise OptionValueError(msg)\n\n return Options(self.__opts2dict(opts), args={'module': module})", "def register_options(options):\n return (\n options\n .register('jsonFilterFile',\n type_=str,\n default=None,\n description=\"Path to JSON file containing certified runs and luminosity blocks.\")\n .register('useHLTFilter',\n type_=bool,\n default=False,\n description=\"If True, only events triggered by one of the skimmed paths will be \"\n \"written out.\")\n .register('jetCollections',\n type_=str,\n default=[],\n multiplicity='list',\n description=\"The names of the jet collections to use (e.g. 'AK4PFCHS').\")\n .register('jecVersion',\n type_=str,\n default=None,\n description=\"Tag of JEC version to use for e.g. JEC uncertainties.\")\n .register('jecFromGlobalTag',\n type_=bool,\n default=False,\n description=\"If True, the JECs will be looked up in the conditions database \"\n \"(CondDB/Frontier) under the current global tag. If False, the \"\n \"text files for `jecVersion` will be used.\")\n .register('jerVersion',\n type_=str,\n default=None,\n description=\"Tag of JER version to use for e.g. jet smearing.\")\n .register('jerMethod',\n type_=str,\n default='stochastic',\n description=\"Method to use for JER smearing. One of: 'stochastic', 'hybrid'\")\n .register('jerGenMatchPtSigma',\n type_=float,\n default=3.0,\n description=\"Size of Gaussian core for 'hybrid' JER smearing.\")\n .register('jetIDSpec',\n type_=str,\n default=None,\n description=\"Version of Jet ID to use (e.g. '2016').\")\n .register('jetIDWorkingPoint',\n type_=str,\n default=None,\n description=\"Working point of Jet ID to use (e.g. 'TightLepVeto').\")\n .register('prefiringWeightFilePath',\n type_=str,\n default=\"\",\n description=\"Path to ROOT file containing prefiring weights.\")\n .register('prefiringWeightHistName',\n type_=str,\n default=\"\",\n description=\"Name of histogram inside prefiring weights file (e.g. 'L1prefiring_jetpt_2016BCD').\")\n .register('useObjectBasedJetID',\n type_=bool,\n default=False,\n description=\"If True, only jets passing the ID specified via 'jetIDSpec' and `jetIDWorkingPoint` will be considered valid.\")\n .register('checkForCompleteness',\n type_=bool,\n default=False,\n description=(\"(for testing) If True, will run some checks on the \"\n \"Ntuple output to ensure all branches are written out \"\n \"and no branch is omitted.\"))\n .register('stitchingWeight',\n type_=float,\n default=1.0,\n description=(\"(deprecated) The output branch 'stitchingWeight' \"\n \"will contain this value for each event. Can then be \"\n \"used when stitching together different samples.\"))\n .register('doJECUncertaintySources',\n type_=bool,\n default=False,\n description=\"Fill ntuple branch with JEC correction factors for individual JEC uncertainty sources.\")\n .register('doPrescales',\n type_=bool,\n default=False,\n description=\"Write out trigger prescales to Ntuple.\")\n .register('edmOut',\n type_=bool,\n default=False,\n description=\"(for testing only) Write out EDM file.\")\n )", "def test_option_passthrough(node_factory, directory):\n plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')\n\n help_out = subprocess.check_output([\n 'lightningd/lightningd',\n '--lightning-dir={}'.format(directory),\n '--help'\n ]).decode('utf-8')\n assert('--greeting' not in help_out)\n\n help_out = subprocess.check_output([\n 'lightningd/lightningd',\n '--lightning-dir={}'.format(directory),\n '--plugin={}'.format(plugin_path),\n '--help'\n ]).decode('utf-8')\n assert('--greeting' in help_out)\n\n # Now try to see if it gets accepted, would fail to start if the\n # option didn't exist\n n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'})\n n.stop()", "def test_register_dynamic_plugin(self):\n pass", "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "def uctt_plugin_factory_cli_config(\n environment: Environment, instance_id: str = ''):\n return ConfigCliPlugin(environment, instance_id)", "def construct_option(arg, hooks, parser):\n option_args = construct_args(arg, hooks, parser, add_descriptions=False)\n if len(arg.option_strings) > 1:\n name = [str(name) for name in arg.option_strings]\n else:\n name = arg.option_strings[0]\n\n option = {\n \"name\": name,\n **get_base_suggestion(arg)\n }\n\n if hasattr(arg, \"action\") and arg.action in REPEATABLE_ACTIONS:\n option[\"isRepeatable\"] = True\n if option_args:\n option[\"args\"] = option_args\n if arg.required:\n option[\"isRequired\"] = True\n\n option_hook = hooks.get(\"option\")\n if option_hook:\n option_hook(option, parser)\n\n return option", "def cli_start_plugin(self, args) -> str:\n plugin_name = args.plugin_name\n if plugin_name not in self.name_to_plugin_class:\n return error(\"Plugin {} DNE\".format(plugin_name))\n\n if not self.name_to_enabled[plugin_name]:\n return error(\"Plugin {} is not enabled\".format(plugin_name))\n\n self.run_plugin(plugin_name)\n return ok(\"Plugin {} started\".format(plugin_name))" ]
[ "0.74358743", "0.6696819", "0.6409247", "0.63467675", "0.62808424", "0.6185173", "0.61584157", "0.6040108", "0.5839735", "0.57951957", "0.57931757", "0.5768283", "0.571611", "0.57148373", "0.55683666", "0.5559388", "0.5539888", "0.5522889", "0.5500167", "0.5443058", "0.5416557", "0.53856325", "0.53748024", "0.5355877", "0.53373", "0.5322043", "0.5322043", "0.53180486", "0.5317438", "0.5292353" ]
0.76158637
0
test Morlet agains T&C table 2 Psi_t(0) = pi^(1/4) Psi_f(0) = 0
def test_Morlet(): morl = cw.MorletWave() assert(np.isclose(morl(0), np.pi**(-1/4), atol=1.e-12)) assert(np.isclose(morl.freq(0), 0, atol=1.e-12))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def _tof_equation(x, y, T0, ll, M):\n if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):\n eta = y - ll * x\n S_1 = (1 - ll - x * eta) * .5\n Q = 4 / 3 * hyp2f1b(S_1)\n T_ = (eta ** 3 * Q + 4 * ll * eta) * .5\n else:\n psi = _compute_psi(x, y, ll)\n T_ = np.divide(np.divide(psi + M * pi,\n np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,\n (1 - x ** 2))\n\n return T_ - T0", "def fermi(E,mu,T):\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)", "def f(t,y):\n return (lam*y)", "def t(o, r):\n return (r/o)**2", "def test_figure34(self):\n star = 0.1\n current = 1.37\n func = lambda x: x**6 + 3 * x - 4\n\n logging.info(\"\\nCONFIRMING FIGURE 3.4\")\n rf_results = undertest.regula_falsi(func, star, current, 100)", "def ft(t):\r\n ft = t ** (1.0 / 3.0) if t > 0.008856 else 7.787 * t + 4 / 29\r\n return ft", "def test_RULE_90():\n\tk, outputs = 3, [0,1,0,1,1,0,1,0]\n\t# Prime Implicants\n\ttrue_pi0s = set(['020','121'])\n\ttrue_pi1s = set(['021','120'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('121',[],[[0,2]]),('020',[],[[0,2]])]\n\ttrue_ts1s = [('120',[[0,2]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def test_y0(self, mocker):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[0])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=1)\r\n\r\n # one tape per parameter, plus one global call\r\n assert len(tapes) == tape.num_params + 1", "def planck_f(nu, T):\n return ((2*h*nu**3)/(c**2))*(1./(np.exp((h*nu)/(k*T))-1))", "def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0, 0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda tt: np.interp(tt, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def test_tan():\n c=0.5\n assert {'diff':EF.tan(c).der, 'value': EF.tan(c).val}=={'diff':0, 'value': math.tan(c)}", "def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))", "def test_flux(equation):\n u = .5\n eps = 1e-5\n expected = (equation.flux(u+eps) - equation.flux(u))/eps\n computed = equation.flux_prime(u)\n npt.assert_allclose(computed, expected, rtol=1e-4)", "def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t", "def test_fission_partial():\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n t = grid.stepping_dim\n\n yl = SubDimension.left(name='yl', parent=y, thickness=4)\n yr = SubDimension.right(name='yr', parent=y, thickness=4)\n\n u = TimeFunction(name='u', grid=grid)\n\n eqns = [Eq(u.forward, u[t + 1, x, y + 1] + 1.).subs(y, yl),\n Eq(u.forward, u[t + 1, x, y - 1] + 1.).subs(y, yr),\n Eq(u.forward, u[t + 1, x, y] + 1.)]\n\n op = Operator(eqns, opt='fission')\n\n assert_structure(op, ['t,x,yl', 't,x,yr', 't,x,y'], 't,x,yl,yr,x,y')", "def test_prop_fluctuation(self):\n tmax = 10.0\n dt = 1.0\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n tutor = SimpleNeurons(1, out_fct=lambda i: ini_rate + i*20.0/nsteps - 10.0)\n reward = MockReward(lambda _: 1.0)\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = (tutor_rule.rates - ini_rate)[:, 0]\n\n fluctuations = (np.arange(nsteps)*20.0/nsteps - 10.0)\n mask = (fluctuations > 0)\n ratio = np.mean(drates[mask] / fluctuations[mask])\n\n self.assertLess(np.max(np.abs(drates - ratio*fluctuations)), 1e-6)", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def _calc_ft(Tci, Thi, Tco, Tho, N_shells) -> 'ft':\n if (Tco - Tci)/Tco < 0.01 or (Thi-Tho)/Tho < 0.01:\n return 1\n try:\n return ht.F_LMTD_Fakheri(Thi, Tho, Tci, Tco,\n shells=N_shells)\n except ValueError:\n return 0.6 # Accounts for worst case scenario", "def svpice(t):\n A0=0.7859063157e0\n A1=0.357924232e-1\n A2=-0.1292820828e-3\n A3=0.5937519208e-6\n A4=0.4482949133e-9\n A5=0.2176664827e-10\n T = t - 273.16\n e = pow(10.0,A0+T*(A1 + T*(A2 + T*(A3 + T*(A4 + T*A5)))))\n return e", "def phi_tau(u, lambda_, b, m, a, t, A0=0.5):\n constant = np.power(lambda_ * b / (lambda_ * b - i_ * u), a * t)\n\n second = i_ * (m * t + (A0 - m) * (1 - np.exp(-lambda_ * t)) / lambda_) * u\n\n third = a / lambda_ * (Li2(1 - i_ * u / (i_ * u - lambda_ * b)) -\n Li2(1 - i_ * np.exp(-lambda_ * t) * u / (i_ * u - lambda_ * b)))\n\n return constant * np.exp(second + third)", "def test_pint_to_simtk():\n q = 5.0 / unit.nanometer\n assert pint_to_simtk(q) == 0.5 / omm_unit.angstrom", "def p(e, t):\n return b * e ** 2", "def o(r, t):\n return r/t**0.5", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)" ]
[ "0.6100648", "0.6093936", "0.60582364", "0.603018", "0.5922259", "0.5845511", "0.58271253", "0.58002853", "0.5758947", "0.56838965", "0.5676569", "0.56403536", "0.56302726", "0.56251395", "0.56142735", "0.56073713", "0.55909216", "0.55874753", "0.5558151", "0.55471605", "0.55363274", "0.55285215", "0.55284435", "0.5520678", "0.5516261", "0.5511401", "0.551098", "0.55050945", "0.550113", "0.5493304" ]
0.64026505
0
Compute and return the precession matrix for FK4 using Newcomb's method. Used inside some of the transformation functions.
def _precession_matrix(oldequinox, newequinox): return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMatrix(self) -> CMatrix4:\n ...", "def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def cpp_calcJoinMatrix(self, noofRows, noofRows2, noofRows3, noofRows4, noofRows5):\n return _patchExtractor.patchExtractor_cpp_calcJoinMatrix(self, noofRows, noofRows2, noofRows3, noofRows4, noofRows5)", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def getTransposeMatrix(self) -> CMatrix4:\n ...", "def prob4():\n #set up the matrices\n solvers.options['show_progress'] = False\n Q = matrix(np.array([[3., 2.,1.],[2.,4.,2.],[1., 2., 3.]]))\n r = matrix([3.,0., 1.])\n #solve the matrices\n sol=solvers.qp(Q, r)\n return np.ravel(sol['x']), sol['primal objective']", "def assign_vertices(self):\n CV_matrix = np.zeros((self.n_c, self.n_v, 3))\n for i in range(3):\n CV_matrix[self.tris[:, i], np.arange(self.n_v), i] = 1\n self.CV_matrix = CV_matrix\n return self.CV_matrix", "def generator_matrix(self):\n C = self.code()\n F = C.base_ring()\n Cor = C.original_code()\n G = Cor.generator_matrix()\n k = C.dimension()\n extra_col = [-sum(G.rows()[i]) for i in range(k)]\n extra_col = matrix(F, k, 1, extra_col)\n return G.augment(extra_col)", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def bloch_matrix(self):\n if self.gf_r is None:\n self.gf()\n\n return -self.gf_r.dot(self.lead[1])", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)", "def inv_sym(self, ):\n m = self.m\n n = self.n\n kQ = self.kQ\n iQ = self.iQ\n iA = self.iA\n kA = self.kA\n kAt = self.kAt\n iAt = self.iAt\n bndmark = self.bndmark\n rngmark = self.rngmark\n\n verbose = self.verbose\n pdf = self.pdf\n\n separable = True\n degree = np.empty(n+m, dtype=np.int)\n nbrs = np.empty(n+m, dtype=object)\n\n #/*-----------------------------------------------------+\n #| First check to see if the problem is separable. */\n\n for j in range(n):\n for k in range(kQ[j], kQ[j+1]):\n if iQ[k] != j:\n separable = False\n break\n\n #/*----------------------------------------------------+\n #| Select ordering priority (primal or dual) */\n\n\n _dense, _fraction, pfillin, dfillin = 0.0, 0.0, 0.0, 0.0\n\n _fraction = 1.0e0\n for j in range(n):\n _dense = float(kA[j+1]-kA[j])/(m+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n pfillin = 0.5*m*m*(1.0e0-_fraction)\n if verbose>2:\n print(\"primal fillin estimate: {:10.0f}\".format(pfillin))\n\n _fraction = 1.0e0\n for i in range(m):\n _dense = float(kAt[i+1]-kAt[i])/(n+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n dfillin = 0.5*n*n*(1.0e0-_fraction)\n if verbose>2:\n print(\"dual fillin estimate: {:10.0f}\\n\".format(dfillin))\n\n if pdf == self._UNSET:\n if 3*pfillin <= dfillin and separable:\n pdf = self._PRIMAL\n if verbose>2:\n print(\"Ordering priority favors PRIMAL\")\n else:\n pdf = self._DUAL\n if verbose>2:\n print(\"Ordering priority favors DUAL\")\n\n\n #/*----------------------------------------------+\n #| Initialize nbrs so that nbrs[col][k] con- |\n #| tains the row index of the k_th nonzero in |\n #| column col. |\n #| Initialize degree so that degree[col] con- |\n #| tains the number of nonzeros in column col. |\n #| */\n\n for j in range(n):\n ne = kA[j+1] - kA[j] + kQ[j+1] - kQ[j]\n nbrs[j] = np.empty(ne, dtype=np.int)\n ne = 0\n for k in range(kA[j], kA[j+1]):\n nbrs[j][ne] = n+iA[k]\n ne+=1\n for k in range(kQ[j],kQ[j+1]):\n if iQ[k] != j:\n nbrs[j][ne] = iQ[k]\n ne+=1\n\n degree[j] = ne\n\n for i in range(m):\n ne = kAt[i+1] - kAt[i]\n nbrs[n+i] = np.empty(ne, dtype=np.int)\n degree[n+i] = ne\n ne = 0\n for k in range(kAt[i], kAt[i+1]):\n nbrs[n+i][ne] = iAt[k]\n ne+=1\n\n #/*----------------------------------------------+\n #| Initialize tier to contain the ordering |\n #| priority scheme. |\n #| */\n\n if self.tier is None:\n self.tier = np.empty(n+m, dtype=np.int)\n n1 = 0\n if pdf == self._PRIMAL:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 0 # 0\n else:\n self.tier[j] = 1 # 2\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n n1+=1\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 1 # 1\n else:\n self.tier[n+i] = 1 # 3\n n1+=1\n\n else:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 1 # 1\n else:\n self.tier[j] = 1 # 3\n n1+=1\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 0 # 0\n else:\n self.tier[n+i] = 1 # 2\n\n\n #/*---------------------------------------------------------+\n #| compute maximum column degree of tier zero columns */\n\n if self.dense < 0:\n denfac = 3.0\n colhisto = np.zeros(n+m+1, dtype=np.int)\n\n for i in range(n+m):\n if self.tier[i] == 0:\n colhisto[ degree[i] ] += 1\n\n tot = 0\n _max = n1\n for i in range(n+m):\n tot += colhisto[i]\n if tot >= _max:\n break\n i+=1\n tot = 0\n cnt = 0\n for j in range(n+m):\n if self.tier[j] == 0:\n tot += degree[j]\n cnt+=1\n self.dense = dense = int(denfac*i)\n\n #dense = (int)(denfac*MAX(i,tot/cnt))\n \t\t#printf(\"i = %d, n = %d, m = %d, n1 = %d \\n\", i,n,m,n1)\n \t\t#printf(\"tot = %d, cnt = %d\\n\", tot, cnt)\n del(colhisto)\n\n\n if verbose>2:\n print(\"dense: {:5d}\".format(dense))\n\n #/*----------------------------------------------+\n #| Get memory for mark[]. */\n\n self.mark = np.empty(n+m, dtype=np.int)\n\n self.lltsym(degree,nbrs)\n\n del(degree)\n del(nbrs)\n self.tier = None", "def design_matrix(nonlinear_p, data, prior):\n P, ecc, omega, M0 = nonlinear_p[:4] # we don't need the jitter here\n\n t = data._t_bmjd\n t0 = data._t_ref_bmjd\n zdot = cy_rv_from_elements(t, P, 1., ecc, omega, M0, t0, 1e-8, 128)\n\n M1 = np.vander(t - t0, N=prior.poly_trend, increasing=True)\n M = np.hstack((zdot[:, None], M1))\n\n return M", "def get_precedence_matrix(self):\n m = zeros(self.size)\n perm = self.array_form\n for i in xrange(m.rows):\n for j in xrange(i + 1, m.cols):\n m[perm[i], perm[j]] = 1\n return m", "def adjoint(self):\n data = []\n for i in range(1, self.rows + 1):\n for j in range(1, self.columns + 1):\n data.append(self._cofactor(i, j))\n\n mat = Matrix(self.rows, self.columns, data)\n return mat.transpose()", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def get_prelu_pattern():\n @mb.program(input_specs=[mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(),\n get_new_symbol(), get_new_symbol()])), ])\n def prelu_pattern(x):\n # perm value can be anything, it will be checked in \"is_var_constraint_satisifed\" method\n x = mb.transpose(x=x, perm=[0,1,2,3], name=\"transpose\")\n return _prelu_pattern(x)\n\n return prelu_pattern", "def assemble_Poisson_6th_order_FD_solver_matrices(Nx, BC):\n\n Poisson_6th_order_FD_solver_matrices = {}\n\n # Nx is the number of active nodes in configuration\n if BC['phi']['x']['type'] == 'PBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0: # first row\n D[i,i] = -2\n D[i,i+1] = 1\n D[i,-1] = 1\n\n elif i == Nx - 1: # last row\n D[i,i] = -2\n D[i,i-1] = 1\n D[i,0] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n if i == 0: # first row\n B[i,-2] = -1/240.\n B[i,-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == 1: # second row\n B[i,-1] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif 1 < i < (Nx - 2): # 2 <= row <= third before last\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == (Nx - 2): # second before last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,0] = -1/240.\n\n elif i == (Nx - 1): # last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,0] = 1/10.\n B[i,1] = -1/240.\n\n\n elif BC['phi']['x']['type'] == 'LDBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0 or i == Nx - 1: # last row\n D[i,i] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n\n # redundant, included for transparency\n if i == 0 or i == Nx - 1:\n B[i,i] = 0\n\n elif i == 1:\n B[i,i-1] = 3/40.\n B[i,i] = 209/240.\n B[i,i+1] = 1/60.\n B[i,i+2] = 7/120.\n B[i,i+3] = -1/40.\n B[i,i+4] = 1/240.\n\n elif i == Nx-1:\n B[i,i] = 0\n\n elif 1 < i < Nx-2:\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n B[i,i-4] = 1/240.\n B[i,i-3] = -1/40.\n B[i,i-2] = 7/120.\n B[i,i-1] = 1/60.\n B[i,i] = 209/240.\n B[i,i+1] = 3/40.\n\n elif BC['phi']['x']['type'] == 'LNBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LNBC row\n D[0,0] = -97/10.\n D[0,1] = 16.\n D[0,2] = -10\n D[0,3] = 5.\n D[0,4] = -3/2.\n D[0,5] = 1/5.\n\n # UDBC row\n D[-1,-1] = 1.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 317 / 240.\n B[i,i+1] = -133/120.\n B[i,i+2] = 187 / 120.\n B[i,i+3] = -23 / 20.\n B[i,i+4] = 109 / 240.\n B[i,i+5] = -3/40.\n\n elif i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n # else i == Nx-1: row of zeros\n\n elif BC['phi']['x']['type'] == 'LDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # UDBC row\n D[0,0] = 1.\n\n # LNBC row\n D[-1,-1] = -97/10.\n D[-1,-2] = 16.\n D[-1,-3] = -10\n D[-1,-4] = 5.\n D[-1,-5] = -3/2.\n D[-1,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n # i == 0 row contains all zeros\n\n if i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n if i == Nx-1:\n B[i,i-5] = -3/40.\n B[i,i-4] = 109 / 240.\n B[i,i-3] = -23 / 20.\n B[i,i-2] = 187 / 120.\n B[i,i-1] = -133/120.\n B[i,i] = 317 / 240.\n\n elif BC['phi']['x']['type'] == 'LDBC_LNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row 0)\n D[0,0] = 1.\n\n # LNBC row, (row 1)\n D[1,0] = -97/10.\n D[1,1] = 16.\n D[1,2] = -10\n D[1,3] = 5.\n D[1,4] = -3/2.\n D[1,5] = 1/5.\n\n # Poisson's equation rows\n for i in range(2,Nx):\n D[i,i-2] = 1\n D[i,i-1] = -2\n D[i,i] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(1,B.shape[0]):\n # if i == 0: row of zeros, density is not involved (corresponds to DBC)\n\n if i == 1:\n B[i,i-1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i+1] = 187 / 120.\n B[i,i+2] = -23 / 20.\n B[i,i+3] = 109 / 240.\n B[i,i+4] = -3/40.\n\n if i == 2:\n B[i, i-2] = 3 / 40.\n B[i, i-1] = 209 / 240.\n B[i,i] = 1 / 60.\n B[i,i+1] = 7 / 120.\n B[i,i+2] = -1 / 40.\n B[i,i+3] = 1 / 240.\n\n elif 3 <= i <= Nx-2:\n B[i,i-3] = -1/240.\n B[i,i-2] = 1/10.\n B[i,i-1] = 97/120.\n B[i,i] = 1/10.\n B[i,i+1] = -1/240.\n\n elif i == Nx-1:\n B[i,i-5] = 1/240.\n B[i,i-4] = -1/40.\n B[i,i-3] = 7/120.\n B[i,i-2] = 1/60.\n B[i,i-1] = 209/240.\n B[i,i] = 3/40.\n\n elif BC['phi']['x']['type'] == 'UDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row Nx-1)\n D[-1,-1] = 1.\n\n # LNBC row, (row Nx-2)\n D[-2,-1] = -97/10.\n D[-2,-2] = 16.\n D[-2,-3] = -10\n D[-2,-4] = 5.\n D[-2,-5] = -3/2.\n D[-2,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(Nx-2):\n D[i,i] = 1\n D[i,i+1] = -2\n D[i,i+2] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 3/40.\n B[i,i+1] = 209/240.\n B[i,i+2] = 1/60.\n B[i,i+3] = 7/120.\n B[i,i+4] = -1/40.\n B[i,i+5] = 1/240.\n\n if 1 <= i < Nx-3:\n B[i,i-1] = -1/240.\n B[i,i] = 1/10.\n B[i,i+1] = 97/120.\n B[i,i+2] = 1/10.\n B[i,i+3] = -1/240.\n\n elif i == Nx-3:\n B[i,i-3] = 1/240.\n B[i,i-2] = -1/40.\n B[i,i-1] = 7/120.\n B[i,i] = 1/60.\n B[i,i+1] = 209/240.\n B[i,i+2] = 3/40.\n\n elif i == Nx-2:\n B[i,i+1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i-1] = 187 / 120.\n B[i,i-2] = -23 / 20.\n B[i,i-3] = 109 / 240.\n B[i,i-4] = -3/40.\n\n # else i == Nx - 1: row of zeros, density is not involved (corresponds to DBC)\n\n Poisson_6th_order_FD_solver_matrices['D'] = D\n Poisson_6th_order_FD_solver_matrices['B'] = B\n\n return Poisson_6th_order_FD_solver_matrices", "def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}", "def project(Ad, Bd, Cd, Dd, q, r, solver=cvx.SCS):\n \n Z = np.zeros((3,3))\n I = np.eye(3)\n A = np.block([[Ad, Z], [I, I]])\n B1 = np.vstack((I, Z))\n B2 = np.vstack((Bd, Z))\n C1 = np.block([[I, Z], [Z, Z]])\n D11 = np.block([[Z], [Z]])\n D12 = np.block([[Z], [I]])\n C2 = np.block([I, Z])\n D21 = Z\n D22 = Z\n\n return hinf_project_pole_alloc(A, B1, B2, C1, C2, D11, D12, D21, D22, q, r, solver)", "def get_P34(self):\n msk = self.load_mask()\n dimsrc = sum(msk)\n dimtgt = self.vs.get_dimension()\n M = matrix(dimtgt, dimsrc, sparse = True)\n j = 0\n for (i, v) in enumerate(msk):\n if v == 1:\n M[i, j] = 1\n j=j+1\n return M", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def adjoint(self):\n return self.cofactorMatrix().transpose()", "def constraint_matrix(self):\n\n con_filename = self.constraint_filename\n con_press, con_data = self._co_star_read(con_filename)\n return con_data" ]
[ "0.6055124", "0.5924322", "0.5643369", "0.5636854", "0.55979395", "0.55794376", "0.5578798", "0.5540976", "0.55308", "0.55139494", "0.54965854", "0.5489014", "0.546786", "0.5440847", "0.5426619", "0.54196495", "0.5367736", "0.532335", "0.5320548", "0.5307548", "0.5295726", "0.5272746", "0.5261689", "0.52440983", "0.5218721", "0.52179533", "0.52177477", "0.5206521", "0.5202562", "0.5193874" ]
0.66207457
0
Remove features with multicollinearity based on crosscorrelation coefficient.
def remove_multicollinearity_correlation(data: pd.DataFrame, threshold: Optional[float] = 0.8) -> pd.DataFrame: corr_data = pd.DataFrame(np.triu(np.abs(data.corr())), columns=data.columns) multicoll_columns = np.logical_and(corr_data >= threshold, corr_data < 1.0).any() return data.loc[:, ~multicoll_columns]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_correlated_features(x, threshold=0.9):\n x_copy = np.copy(x)\n \n corr_matrix = np.corrcoef(x_copy, rowvar=False)\n # Set to False highly correlated columns\n nb_col = len(corr_matrix)\n columns = np.full((nb_col,), True, dtype=bool)\n for i in range(nb_col):\n for j in range(i+1, nb_col):\n if corr_matrix[i, j] >= threshold:\n if columns[i]:\n columns[j] = False\n \n # Remove correlated features and concat categorical features\n return x_copy[:, columns], columns", "def filter_collinearity(c, threshold):\n\t# ensure symmetric\n\tif c.shape[0] != c.shape[1]:\n\t\traise ValueError('input dataframe should be symmetrical in dimensions')\n\n\t# init drops list\n\tdrops = []\n\tmacor = [] # mean abs corrs\n\tcorrz = [] # the correlations\n\n\t## Iterate over each feature\n\tfinished = False\n\twhile not finished:\n\n\t\t# Whenever there's a break, this loop will start over\n\t\tfor i,nm in enumerate(c.columns):\n\t\t\tthis_col = c[nm].drop(nm).sort_values(na_position='first') # gets the column, drops the index of itself, and sorts\n\t\t\tthis_col_nms = this_col.index.tolist()\n\t\t\tthis_col = np.array(this_col)\n\n\t\t\t# check if last value is over thresh\n\t\t\tmax_cor = this_col[-1]\n\t\t\tif pd.isnull(max_cor) or max_cor < threshold or this_col.shape[0] == 1:\n\t\t\t\tif i == c.columns.shape[0] - 1:\n\t\t\t\t\tfinished = True\n\n\t\t\t\t# control passes to next column name or end if finished\n\t\t\t\tcontinue\n\n\t\t\t# otherwise, we know the corr is over the threshold\n\t\t\t# gets the current col, and drops the same row, sorts asc and gets other col\n\t\t\tother_col_nm = this_col_nms[-1]\n\t\t\tthat_col = c[other_col_nm].drop(other_col_nm)\n\n\t\t\t# get the mean absolute correlations of each\n\t\t\tmn_1, mn_2 = np.nanmean(this_col), np.nanmean(that_col)\n\n\t\t\t# we might get nans?\n\t\t\t# if pd.isnull(mn_1) and pd.isnull(mn_2):\n\t\t\t\t# this condition is literally impossible, as it would\n\t\t\t\t# require every corr to be NaN, and it wouldn't have\n\t\t\t\t# even gotten here without hitting the continue block.\n\t\t\tif pd.isnull(mn_1):\n\t\t\t\tdrop_nm = other_col_nm\n\t\t\telif pd.isnull(mn_2):\n\t\t\t\tdrop_nm = nm\n\t\t\telse:\n\t\t\t\tdrop_nm = nm if mn_1 > mn_2 else other_col_nm\n\n\t\t\t# drop the bad col, row\n\t\t\tc.drop(drop_nm, axis=1, inplace=True)\n\t\t\tc.drop(drop_nm, axis=0, inplace=True)\n\n\t\t\t# add the bad col to drops\n\t\t\tdrops.append(drop_nm)\n\t\t\tmacor.append(np.maximum(mn_1, mn_2))\n\t\t\tcorrz.append(_MCFTuple(\n\t\t\t\t\tfeature_x=drop_nm,\n\t\t\t\t\tfeature_y=nm if not nm == drop_nm else other_col_nm,\n\t\t\t\t\tabs_corr=max_cor,\n\t\t\t\t\tmac=macor[-1]\n\t\t\t\t))\n\n\t\t\t# if we get here, we have to break so the loop will \n\t\t\t# start over from the first (non-popped) column\n\t\t\tbreak\n\n\t\t# if not finished, restarts loop, otherwise will exit loop\n\n\t# return\n\treturn drops, macor, corrz", "def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)", "def remove_highly_correlated_vars_fast(df, corr_limit=0.70):\r\n # Creating correlation matrix\r\n correlation_dataframe = df.corr().abs().astype(np.float16)\r\n # Selecting upper triangle of correlation matrix\r\n upper_tri = correlation_dataframe.where(np.triu(np.ones(correlation_dataframe.shape),\r\n k=1).astype(np.bool))\r\n # Finding index of feature columns with correlation greater than 0.95\r\n to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > corr_limit)]\r\n print();\r\n print('Highly correlated columns to remove: %s' %to_drop)\r\n return to_drop", "def prune_corr_features(X_feat, threshold):\n feats = list(set(feat for f_set in X_feat for feat in f_set))\n num_before = len(feats)\n step = 2000\n\n for k in range(5):\n to_keep = set()\n random.shuffle(feats)\n\n for i in range(0, len(feats), step):\n size = min(step, len(feats) - i)\n x = np.zeros((len(X_feat), size))\n sub_feats = sorted(\n feats[i : i + size], key=lambda f: 30 * f.count(FEAT_JOINER) - len(f)\n )\n for j, x_f in enumerate(sub_feats):\n idx = [x_f in f_set for f_set in X_feat]\n x[idx, j] = 1\n\n corr, _ = spearmanr(x)\n corr = np.triu(corr, k=1)\n corr = np.any(np.abs(corr) > threshold, axis=0)\n to_keep.update(feat for n, feat in enumerate(sub_feats) if not corr[n])\n log.debug(f\"At {i:4d}: eliminated {sum(corr):3d} features\")\n\n feats = list(to_keep)\n log.debug(f\"Iteration {k+1}: kept {len(feats)} after pruning\")\n\n return X_pruned, num_before - len(to_prune)", "def remove_features(data, target, fn):\n selected_data = []\n if fn == 'variance':\n sel = VarianceThreshold(threshold=(.1 * (1 - .8)))\n selected_data = sel.fit_transform(data)\n elif fn == 'L1':\n lsvc = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(data, target)\n model = SelectFromModel(lsvc, prefit=True)\n selected_data = model.transform(data)\n\n selected_t = np.transpose(selected_data)\n data_t = np.transpose(data)\n\n i = 0\n kept_cols = []\n removed_cols = []\n for i, col in enumerate(data_t):\n if col not in selected_t:\n removed_cols.append(i)\n else:\n kept_cols.append(i)\n return kept_cols, removed_cols", "def feature_selection(feature_matrix, missing_threshold=90, correlation_threshold=0.95):\n \n feature_matrix = pd.get_dummies(feature_matrix)\n n_features_start = feature_matrix.shape[1]\n print('Original shape: ', feature_matrix.shape)\n\n # Find missing and percentage\n missing = pd.DataFrame(feature_matrix.isnull().sum())\n missing['percent'] = 100 * (missing[0] / feature_matrix.shape[0])\n missing.sort_values('percent', ascending = False, inplace = True)\n\n # Missing above threshold\n missing_cols = list(missing[missing['percent'] > missing_threshold].index)\n n_missing_cols = len(missing_cols)\n\n # Remove missing columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in missing_cols]]\n print('{} missing columns with threshold: {}.'.format(n_missing_cols,\n missing_threshold))\n \n # Zero variance\n unique_counts = pd.DataFrame(feature_matrix.nunique()).sort_values(0, ascending = True)\n zero_variance_cols = list(unique_counts[unique_counts[0] == 1].index)\n n_zero_variance_cols = len(zero_variance_cols)\n\n # Remove zero variance columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in zero_variance_cols]]\n print('{} zero variance columns.'.format(n_zero_variance_cols))\n \n # Correlations\n corr_matrix = feature_matrix.corr()\n\n # Extract the upper triangle of the correlation matrix\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n # Select the features with correlations above the threshold\n # Need to use the absolute value\n to_drop = [column for column in upper.columns if any(upper[column].abs() > correlation_threshold)]\n\n n_collinear = len(to_drop)\n \n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in to_drop]]\n print('{} collinear columns removed with threshold: {}.'.format(n_collinear,\n correlation_threshold))\n \n total_removed = n_missing_cols + n_zero_variance_cols + n_collinear\n \n print('Total columns removed: ', total_removed)\n print('Shape after feature selection: {}.'.format(feature_matrix.shape))\n return feature_matrix", "def remove_effect_of_covariates(X, c, inline=False):\n\n # Standardize the values of the covariates\n c = StandardScaler().fit_transform(c)\n\n # Get the DataFrame to work with\n x = X.copy() if not inline else X\n\n # Prepare the dimensions\n x = make_2_ndim(x)\n c = make_2_ndim(c)\n\n for i in range(x.shape[1]):\n\n # Create the linear regression model\n regressor = LinearRegression()\n\n # fit the regressor (feature = f(covariates))\n regressor.fit(c, x[:, i])\n\n # Subtract the effect of covariates (feature = residuals = feature - predictions)\n x[:, i] = x[:, i] - regressor.predict(c)\n\n return X", "def test_remove_autos():\n test_array = np.ones((3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(0, 1))\n assert (6, 11, 21) == out_array.shape", "def correlation_drop(df, threshold):\n df_copy = df.copy()\n col_corr = set()\n\n corr_matrix = df_copy.corr()\n\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i]\n col_corr.add(colname)\n if colname in df_copy.columns:\n del df_copy[colname]\n print(col_corr)\n return df_copy", "def removeAllCorrelations(self, removeReImCorrel = True):\n\t\tdim = len(self.coma)/2\n#\t#\tCMwrite(\"removeAllCorrelations\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif not i == j:\n\t\t\t\t\tself.coma[2*i ,2*j ] = 0.\t\t\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\t\t\t\tself.coma[2*i+1,2*j+1] = 0.\n\t\t\t\telif removeReImCorrel:\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def remove_unwanted_features(self):\n\n bad_feats = []\n for f in self.features:\n\n # Exclude features with no data\n if self.valuecounts[f] == 0:\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because there are no datapoints for selected languages.\"\"\" % (self.name, f))\n bad_feats.append(f)\n continue\n\n # Exclude features with lots of missing data\n missing_ratio = self.missing_ratios[f]\n if int(100*(1.0-missing_ratio)) < self.minimum_data:\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because of excessive missing data (%d%%).\"\"\" % (self.name, f, int(missing_ratio*100)))\n bad_feats.append(f)\n continue\n\n # Exclude constant features\n if self.valuecounts[f] == 1:\n if self.remove_constant_features:\n self.constant_feature_removed = True\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because its value is constant across selected languages. Set \"remove_constant_features=False\" in config to stop this.\"\"\" % (self.name, f))\n bad_feats.append(f)\n continue\n else:\n self.constant_feature = True\n\n for bad in bad_feats:\n self.features.remove(bad)\n for lang in self.languages:\n if bad in self.data[lang]:\n self.data[lang].pop(bad)\n\n # Make sure there's something left\n if not self.features:\n raise ValueError(\"No features specified for model %s!\" % self.name)\n self.features.sort()\n self.messages.append(\"\"\"[INFO] Model \"%s\": Using %d features from data source %s\"\"\" % (self.name, len(self.features), self.data_filename))\n if self.constant_feature and self.rate_variation:\n self.messages.append(\"\"\"[WARNING] Model \"%s\": Rate variation enabled with constant features retained in data. This *may* skew rate estimates for non-constant features.\"\"\" % self.name)", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def drop_corr_columns(df, drop_columns=True, print_columns=True, threshold=0.98):\n\n # 1. calculation\n CorrCoeff = df.corr()\n\n # 2. report\n CorrFieldsList = []\n print('Columns with correlations more than %s :' % str(threshold))\n for i in CorrCoeff:\n for j in CorrCoeff.index[CorrCoeff[i] >= threshold]:\n if i != j and j not in CorrFieldsList:\n CorrFieldsList.append(j)\n if print_columns:\n print(\"%s-->%s: r^2=%f\" % (i, j, CorrCoeff[i][CorrCoeff.index == j].values[0]))\n #print()\n #print('Correlated columns count: %', len(CorrFieldsList))\n\n # 3. dropping\n if drop_columns:\n print('%s columns total' % df.shape[1])\n df = df.drop(CorrFieldsList, 1)\n print('%s columns left' % df.shape[1])\n\n return df", "def test_remove_middle_axis():\n test_array = np.ones((13, 17, 19, 3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(3, 4))\n assert (13, 17, 19, 6, 11, 21) == out_array.shape", "def exclude_some_features_matrical_samples(data,features,given=None):\n num_sample=data.shape[0]\n feat_total=data.shape[1]\n num_signal=len(features)\n feat_each=feat_total//num_signal\n if given is None:\n return data,features\n common,ind1,ind2=take_common_features(features,given)\n data=data.reshape((num_sample,num_signal,feat_each))\n data=np.delete(data,ind1,axis=1)\n features=np.delete(features,ind1)\n data=data.reshape((num_sample,(len(features))*feat_each))\n return data,features", "def checkCollinearity(x):\n C_mat = x.corr()\n fig = plt.figure(figsize = (15,15))\n sb.heatmap(C_mat, vmax = .8, square = True)\n plt.show()", "def crosscorr(x, y, **kwargs):\r\n # just make the same computation as the crosscovariance,\r\n # but without subtracting the mean\r\n kwargs['debias'] = False\r\n rxy = crosscov(x, y, **kwargs)\r\n return rxy", "def remove_redundants(tr_x, tr_y, threshold = 0.95): \n corrm = np.corrcoef(np.hstack([tr_x,tr_y.reshape((-1,1))]).T)\n rows, cols = np.where((corrm>threshold) & (corrm<1.0))\n idx = [c for r,c in zip(rows,cols) if (c>r)]\n tr_x_removed = np.delete(tr_x, idx, axis=1)\n return tr_x_removed, idx", "def test_remove_autos_with_pols():\n test_array = np.ones((4, 3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(1, 2))\n assert (4, 6, 11, 21) == out_array.shape", "def test_no_cosmics(self):\n prng = np.random.RandomState(84287)\n y = prng.normal(size=1000)\n\n y2 = remove_cosmics(y)\n\n np.testing.assert_array_equal(y, y2)", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def remove_compl(hyplo_list):\r\n\tfor x in hyplo_list:\r\n\t\tif get_coml_s(x) in hyplo_list:\r\n\t\t\thyplo_list.remove(x)", "def exclude_some_features(data,features,given=None):\n if given is None:\n return data,features\n common,ind1,ind2=take_common_features(features,given)\n data=np.delete(data,ind1,axis=1)\n features=np.delete(features,ind1)\n return data,features", "def remove_invariable_features(tX):\n\n features = tX.T\n stds = np.std(features, axis=1)\n indices = np.where(stds == 0)\n new_tX = np.delete(features, indices, 0).T\n return new_tX", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def trim_features():\n pass", "def mask_specificity(Y, Y_pred): \n cm = confusion_matrix(Y.reshape(-1), Y_pred.reshape(-1))\n return specificity(cm)", "def remove_cofactors_from_Sij(Sij_df, cofactors):\n if len(cofactors) == 0:\n return Sij_df\n\n # Get a list of cofactors in the model\n cofactors = list(set(cofactors) & set(Sij_df.index.tolist()))\n\n # Remove row of cofactors\n nSij_df = Sij_df.drop(cofactors)\n\n allRxns = nSij_df.columns.tolist()\n\n # Get all columns (j) with all zero entries\n rxns_involving_cofactors_only = nSij_df.columns[(\n nSij_df == 0).all()].tolist()\n\n remainRxns = list(set(allRxns) - set(rxns_involving_cofactors_only))\n\n # Drop all columns with zero entries\n nSij_df2 = nSij_df[sorted(remainRxns)]\n\n return nSij_df2", "def prune_conformers(self, param={'M':'cml1', 'rp':1.0,'thresh':0.25,'wz':F,'sort':T}):\n if param['M'] in ['rmsd']:\n ds = self.get_rmsd()\n elif param['M'] in ['cm','cml1']:\n ds = self.get_dcm(param)\n else:\n raise '#ERROR: unknow rep'\n #print ' ++ ds = ', ds\n #print ' |__ es = ', np.array(self.es)\n seq = np.argsort(self.es) # sort by increasing energy\n ccids = []\n for i in seq:\n # always keep lowest-energy conformer\n if len(ccids) == 0:\n ccids.append(i)\n continue\n\n # discard conformers within the RMSD threshold\n if np.all(ds[i][ccids] >= thresh):\n ccids.append(i)\n self.nconf = len(ccids)\n # creat a new mol object with unique conformers\n new = Chem.Mol(self.mol)\n new.RemoveAllConformers()\n for i in ccids:\n ci = self.mol.GetConformer(i)\n new.AddConformer(ci, assignId=True)\n self.mol = new" ]
[ "0.6917892", "0.61253226", "0.5922287", "0.5854584", "0.5745143", "0.5716787", "0.5700009", "0.5627028", "0.5626299", "0.5607187", "0.5549474", "0.5468665", "0.5422611", "0.537759", "0.5374915", "0.5370696", "0.5360709", "0.5357423", "0.53232837", "0.53192794", "0.5300013", "0.5155853", "0.5130762", "0.51128024", "0.50934994", "0.5090408", "0.5072422", "0.50701964", "0.5039958", "0.5004291" ]
0.64739853
1
Test for picking the highest rating agent and picking the right agent when two agents are finish at the same time
def test_2_agents_done_at_once(self): # Test highest rating agent agent, wait_time = Agent.get(TEST_CUSTOMERS[0]) self.assertEqual( (TEST_AGENTS[1], 0), (agent.agent, wait_time)) # Test 2 agents done at the same time Agent.get(TEST_CUSTOMERS[1]) agent, wait_time = Agent.get(TEST_CUSTOMERS[2]) self.assertEqual( (TEST_AGENTS[1], 60), (agent.agent, wait_time))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self):\n highest_offer = None\n\n if self.manager is None:\n highest_rep = 0\n\n else:\n highest_rep = self.manager.reputation\n\n for offer in self.offers:\n if offer.manager.reputation > highest_rep:\n highest_offer = offer\n\n if highest_offer is not None:\n highest_offer.accept()\n\n self.offers = []", "def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n roll = random.uniform(0,1)\r\n if roll <= self.epsilon:\r\n action = random.choice( list( range(0,len(action_value_estimates))))\r\n else:\r\n action = self.argmax_with_random_tiebreaker(action_value_estimates)\r\n return action", "def _select_heuristic(self):\n\n # take a sample of rewards from the current prior of heuristics\n sample_rewards = np.random.normal(self.prior_mus, self.prior_sigmas)\n\n # select the heuristic that has the highest reward sample value\n self.best_heuristic_idx = np.argmax(sample_rewards)\n self.best_heuristic = self.heuristics[self.best_heuristic_idx]\n self.heuristic_selection.append(self.best_heuristic_idx)", "def vote(self, agents):\n\n suspects = []\n known_impostor = -1\n # Check which agents the current agent still suspects\n for a in agents:\n if self.km.knows_imp(self.agent_id, a.agent_id):\n known_impostor = a.agent_id\n self.logger.log(f\"Crewmate {self.agent_id} suspects {a.agent_id}\", Logger.LOG | Logger.PRINT_VISUAL)\n elif self.km.suspects(self.agent_id, a.agent_id):\n suspects.append(a.agent_id)\n self.logger.log(f\"Crewmate {self.agent_id} suspects {a.agent_id}\", Logger.LOG | Logger.PRINT_VISUAL)\n\n if known_impostor != -1:\n vote = known_impostor\n else:\n # Randomly vote for an agent on the suspect-list\n vote = random.sample(suspects, 1)[0]\n\n # If you are not yet sure, there is a probability that you vote pass.\n # This probability increases if you suspect more people (and are therefore less sure)\n threshold = (len(suspects) / (self.num_crew + self.num_imp)) * 0.5\n if random.random() < threshold:\n vote = -1\n\n self.logger.log(f\"Crewmate {self.agent_id} votes for {vote}\\n\", Logger.PRINT_VISUAL | Logger.LOG)\n return vote", "def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def pick_action(self, observation):\n # 注意: 只有此处不一样, 即TS里是从后验分布中采样,而epsilon-greedy是计算期望\n sampled_means = self.get_posterior_sample() # 每个arm都采样一个reward均值, [arm, 1]\n action = random_argmax(sampled_means) # 选择产生最大的均值的action\n return action", "def vote(self, agents):\n\n # If the impostors have a set target, vote that\n if self.target != -1:\n vote = self.target\n else: # Vote a random living agents\n vote = random.sample([a.agent_id for a in agents if not a.agent_id == self.agent_id and a.alive and not a.is_impostor()], 1)[0]\n\n self.target = -1\n self.logger.log(f\"Impostor {self.agent_id} votes for {vote}\", Logger.LOG | Logger.PRINT_VISUAL)\n return vote", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n for idx,a in enumerate(actions):\n baby = self.getSuccessor(gameState, a)\n qsum = [self.evaluate(baby, action) for action in baby.getLegalActions(self.index)]\n values[idx] += min(qsum) \n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def chooseAction(self, gameState):\n\n ####print \"chooseAction Called\"\n\n #self.lastEatenFood = None\n\n\n actions = gameState.getLegalActions(self.index)\n\n ##print \"\\nNEW ACTION\\n--------\"\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # ###print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n \n\n return random.choice(bestActions)", "def _choose_best_option(self):", "def chooseAction(self, gameState):\r\n actions = gameState.getLegalActions(self.index)\r\n\r\n # You can profile your evaluation time by uncommenting these lines\r\n # start = time.time()\r\n values = [self.evaluate(gameState, a) for a in actions]\r\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\r\n\r\n maxValue = max(values)\r\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\r\n\r\n foodLeft = len(self.getFood(gameState).asList())\r\n\r\n if foodLeft <= 2:\r\n bestDist = 9999\r\n for action in actions:\r\n successor = self.essor(gameState, action)\r\n pos2 = successor.getAgentPosition(self.index)\r\n dist = self.getMazeDistance(self.start,pos2)\r\n if dist < bestDist:\r\n bestAction = action\r\n bestDist = dist\r\n return bestAction\r\n\r\n return random.choice(bestActions)", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n obs = gameState.getAgentDistances()\n for o in self.opponents:\n self.observe(o, obs[o], gameState)\n self.displayDistributionsOverPositions(self.distributions)\n\n # You can profile your evaluation time by uncommenting these lines\n start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n #self.elapseTime(gameState)\n\n return random.choice(bestActions)", "def chooseAction(self, gameState):\n\n # Track opponents position\n self.trackGhosts(gameState)\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n\n # You can profile your evaluation time by uncommenting these lines\n values = [self.evaluate(gameState, a) for a in actions]\n\n # Trick to avoid getting stuck in the same position for too long\n okValues = []\n okActions = []\n for i, a in enumerate(actions):\n newPos = self.getSuccessor(gameState, a).getAgentState(self.index).getPosition()\n # If any newPos is in lastPositions more than twice, we remove it from the list\n if self.lastPositions.count(newPos) <= 2:\n okValues.append(values[i])\n okActions.append(a)\n\n # Choose best action from list of actions\n try:\n maxValue = max(okValues)\n bestActions = [a for a, v in zip(okActions, okValues) if v == maxValue]\n except ValueError:\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n chosenAction = random.choice(bestActions)\n\n print \"BEST ACTION:\", chosenAction, maxValue, \"\\n--------------------------------------------------------------\\n\"\n # if chosenAction == Directions.STOP:\n # raw_input(\"Stopped! Go check what happened\")\n\n # Save current chosen position in lastPositions and advance the index\n self.lastPositions[self.lastPositionsIdx % self.positionsStored] = self.getSuccessor(gameState, chosenAction).getAgentState(self.index).getPosition()\n self.lastPositionsIdx += 1\n\n # If we are eating any ghost, update our future belief about it\n self.updateEatenOpponents2(gameState, chosenAction)\n # Update food eaten by opponents\n self.babies = self.getFoodYouAreDefending(gameState).asList()\n # Update self.isPacman\n self.isPacman = [self.getSuccessor(gameState, chosenAction).getAgentState(i).isPacman for i in range(gameState.getNumAgents())]\n\n return chosenAction", "def execute_best_actions(self):\n while True:\n print(\"In execute_best_actions\")\n s = self.get_state_num()\n qvals = self.Q[s]\n # Get action with largest qval\n best_action = np.argmax(qvals)\n # We don't actually update with rewards,\n # but use them to know when to perform next action\n # We want to travel 0.5 m in action's direction.\n self.apply_action(best_action)\n while self.reward == None:\n rospy.sleep(0.5)\n print(\"Reward =\", self.reward)\n self.reward = None", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n \n def MaxValue(gameState, currentDepth, agentNumber, alpha, beta):\n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n return (self.evaluationFunction(gameState), Directions.NORTH)\n \n largestValue = float(\"-inf\")\n bestAction = Directions.NORTH\n for action in gameState.getLegalActions(agentNumber):\n successor = gameState.generateSuccessor(agentNumber, action)\n nextAgentNumber = (agentNumber + 1) % gameState.getNumAgents()\n successorValue = MinValue(successor, currentDepth, nextAgentNumber, alpha, beta)[0]\n if(successorValue >= beta):\n return (successorValue, action)\n alpha = max(alpha, successorValue)\n if(successorValue > largestValue):\n largestValue = successorValue\n bestAction = action\n return (largestValue, bestAction)\n \n def MinValue(gameState, currentDepth, agentNumber, alpha, beta):\n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n return (self.evaluationFunction(gameState), Directions.NORTH)\n \n smallestValue = float(\"inf\")\n bestAction = Directions.NORTH \n for action in gameState.getLegalActions(agentNumber):\n successor = gameState.generateSuccessor(agentNumber, action)\n nextAgentNumber = (agentNumber + 1) % gameState.getNumAgents()\n if nextAgentNumber is 0:\n successorValue = MaxValue(successor, currentDepth + 1, nextAgentNumber, alpha, beta)[0]\n else:\n successorValue = MinValue(successor, currentDepth, nextAgentNumber, alpha, beta)[0]\n if(successorValue <= alpha):\n return (successorValue, action)\n beta = min(beta, successorValue)\n if(successorValue < smallestValue):\n smallestValue = successorValue\n bestAction = action\n return (smallestValue, bestAction)\n\n alpha = float(\"-inf\")\n beta = float(\"inf\") \n result=MaxValue(gameState, 0, 0, alpha, beta)\n resultActionToTake = result[1]\n #import time\n #print 'AlphaBeta value for depth ', self.depth,' ',result[0]\n #time.sleep(1000)\n return resultActionToTake", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n\n def MaxValue(gameState, currentDepth, agentNumber):\n \n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n \n #print 'evaluation function at leaf ',self.evaluationFunction(gameState)\n return (self.evaluationFunction(gameState), Directions.NORTH)\n #print 'depth ',currentDepth\n \n largestValue = float(\"-inf\")\n bestAction = Directions.NORTH\n for action in gameState.getLegalActions(agentNumber):\n #print 'analyzing ',action,' for pacman ',agentNumber\n successor = gameState.generateSuccessor(agentNumber, action)\n successorValue = MinValue(successor, currentDepth, (agentNumber + 1) % gameState.getNumAgents())[0]\n if(successorValue > largestValue):\n largestValue = successorValue\n bestAction = action\n return (largestValue, bestAction)\n \n def MinValue(gameState, currentDepth, agentNumber):\n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n #print 'evaluation function at leaf ',self.evaluationFunction(gameState)\n return (self.evaluationFunction(gameState), Directions.NORTH)\n \n #print 'depth ',currentDepth\n \n smallestValue = float(\"inf\")\n bestAction = Directions.NORTH \n for action in gameState.getLegalActions(agentNumber):\n #print 'analyzing ',action,' for ghost ',agentNumber\n successor = gameState.generateSuccessor(agentNumber, action)\n nextAgentNumber = (agentNumber + 1) % gameState.getNumAgents()\n if nextAgentNumber is 0:\n successorValue = MaxValue(successor, currentDepth + 1, nextAgentNumber)[0]\n else:\n successorValue = MinValue(successor, currentDepth, nextAgentNumber)[0]\n if(successorValue < smallestValue):\n smallestValue = successorValue\n bestAction = action\n return (smallestValue, bestAction)\n\n result = MaxValue(gameState, 0, 0)\n resultActionToTake = result[1]\n #print 'Minimax value for depth ', self.depth,' ',result[0]\n #import time\n #time.sleep(1000000)\n return resultActionToTake", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n opIndices = self.getOpponents(gameState)\n opStates = [gameState.getAgentState(i) for i in opIndices]\n opCarry = [x.numCarrying for x in opStates]\n \n if max(opCarry) >= 5:\n self.isOffensive = False\n else:\n self.isOffensive = True\n\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n\n\n # print if get eaten\n myPos = gameState.getAgentPosition(self.index)\n prevGameState = self.getPreviousObservation()\n if prevGameState is not None:\n\n previousPos = prevGameState.getAgentPosition(self.index)\n if self.getMazeDistance(myPos, previousPos) > 1:\n print(\"prePostion\",previousPos)\n print()\n previousLegalAction = prevGameState.getLegalActions(self.index)\n print([(self.evaluate(prevGameState, a), a) for a in previousLegalAction])\n print()\n print(self.getNonScaredGhostPos(prevGameState))\n print()\n print()\n\n\n return random.choice(bestActions)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start, pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n \n def miniMaxAgent(state, depth, agentIndex):\n \n #return evaluation function if game is won or lost or the depth is reached\n if state.isLose() or state.isWin() or depth == self.depth:\n return self.evaluationFunction(state)\n \n #if agent index is 0 that is pacman so get the maximum value\n if agentIndex == 0:\n maximum = float(\"-inf\")\n actions = state.getLegalActions(agentIndex) #pacmans legal actions\n \n #get successor from all actions and set maximum as the largest value found\n for action in actions:\n successor = state.generateSuccessor(agentIndex, action)\n maximum = max(maximum, miniMaxAgent(successor, depth, 1)) #pass agent index as 1 to move onto the ghosts (min)\n \n return maximum\n \n #if agent index is greater than 0 it is a ghost so get the minimum value\n else:\n minimum = float(\"inf\")\n actions = state.getLegalActions(agentIndex)\n \n #get successor from all actions and set the minimum to the smallest value found\n for action in actions:\n successor = state.generateSuccessor(agentIndex, action) #ghosts legal actions\n \n #if all ghosts have found a min, increase depth and set agent back to pacman (max)\n if state.getNumAgents() -1 == agentIndex:\n minimum = min(minimum, miniMaxAgent(successor, depth + 1, 0))\n \n #increase agent index to get the next ghost\n else:\n minimum = min(minimum, miniMaxAgent(successor, depth, agentIndex + 1))\n return minimum\n \n score = float(\"-inf\")\n direction = None\n \n #get all the legal actions in the game state\n legalActions = gameState.getLegalActions()\n \n #get successor from each action in the game\n for action in legalActions:\n successor = gameState.generateSuccessor(0, action)\n \n #start at depth 0 and agent index 1\n value = miniMaxAgent(successor, 0, 1)\n \n #find the largest value and set direction to the action that returned that value\n if score < value:\n score = value\n direction = action\n \n return direction\n util.raiseNotDefined()", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n actions.remove('Stop') #DON'T STOP THE DISCO\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n\n if gameState.getAgentState(self.index).isPacman:\n self.turnsAsPacman+=1\n elif self.turnsAsPacman < 4 and self.turnsAsPacman > -1:\n self.loopProtection+=1\n if self.loopProtection > 2:\n self.loopProtection = -1\n self.turnsAsPacman = -1\n\n if self.index == self.debug_index:\n print(actions)\n print(values)\n # print(self.getPreviousObservation(), file=sys.stderr)\n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n # if self.index == 1:\n # print(bestActions, file=sys.stderr)\n\n #run for start if enough food is held\n foodLeft = len(self.getFood(gameState).asList())\n\n \"\"\"\n #maybe keep this, but it's not always efficient for hauler\n if foodLeft <= 2 or gameState.getAgentState(self.index).numCarrying > 5:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n \"\"\"\n\n choice = random.choice(bestActions)\n\n if self.index == self.debug_index:\n print(\"Choice: \" + choice)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n return choice", "def chooseAction(self, gameState):\n #actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n #values = [self.evaluate(gameState, a) for a in actions] #no evaluation currently\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n #return random.choice(actions)\n if len(self.getFood(gameState).asList()) <= 2:\n print \"\\n Agent \", self.index,\"plan was terminated with\", len(self.getFood(gameState).asList()), \"dots left\"\n\n pos = gameState.getAgentState(self.index).getPosition()\n localbestDist = 9999\n dest = self.start\n bestDest = dest\n dist = self.getMazeDistance(dest,pos)\n\n for el in xrange(-2,5):\n try:\n idx = self.safeSpaces.index((self.safeColumn, pos[1] + el))\n dest = self.safeSpaces[idx]\n dist = self.getMazeDistance(dest,pos)\n #print \"possible destination at\", dest\n except ValueError:\n print \"X: \", (self.safeColumn, pos[1] + el), \"not valid destination\"\n continue\n\n print \"Current destination to check at \", dest, \"at dist:\", dist\n if dist < localbestDist:\n localbestDist = dist\n bestDest = dest\n\n bestDist = 9999\n for pos2, action, cost in getSuccessorsAlt(gameState, pos):\n dist = self.getMazeDistance(bestDest,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n\n print \"Agent \", self.index, \"found optimal safe space at\", bestDest , \"with dist\", bestDist, \"coloring spot now\"\n print \"Agent \", self.index, \"Going\", bestAction, \" from\",gameState.getAgentPosition(self.index), \"\\n\"\n self.debugDraw([bestDest], [1,1,0], clear=False)\n return bestAction\n\n if self.counter == 0: #-1:\n print \"Calculating\", self.cacheSize, \"moves as player\", self.index, \"from \", gameState.getAgentPosition(self.index)\n print \"Cached value\"\n self.best = self.ActionLoop(gameState, self.cacheSize)\n self.moves = self.best.getDir()[1]\n\n if not self.moves or len(self.moves) == 0:\n print \"Tried to play move, but ran Out of Moves!!!\"\n actions = gameState.getLegalActions(self.index)\n return random.choice(actions)\n\n self.intendedCoords = self.best.state[0]\n self.counter = self.cacheSize\n try:\n move = self.moves[self.cacheSize - self.counter]\n self.counter -= 1\n except:\n print \"Tried to access index\", self.cacheSize - self.counter, \"in list of length\", len(self.moves)#, \"more moves now generated\"\n print \"Agent\", self.index, \"Defaulting to closest Agent Protocol\"\n self.counter = 9999\n return calcMoves(self, gameState)\n\n\n #actions = gameState.getLegalActions(self.index)\n #self.counter = 0\n #return self.chooseAction(gameState)\n #return random.choice(actions)\n print \"On move \", self.cacheSize - self.counter, \"as player\", self.index, \"going\", move, \"from\", gameState.getAgentPosition(self.index)\n return move", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n def maxvalue(gameState, alpha, beta, depth):\n if gameState.isWin() or gameState.isLose() or depth == 0:\n return self.evaluationFunction(gameState)\n v = -(float(\"inf\"))\n legalActions = gameState.getLegalActions(0)\n for action in legalActions:\n nextState = gameState.generateSuccessor(0, action)\n v = max(v, minvalue(nextState, alpha, beta, gameState.getNumAgents() - 1, depth))\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n \n def minvalue(gameState, alpha, beta, agentindex, depth):\n numghosts = gameState.getNumAgents() - 1\n if gameState.isWin() or gameState.isLose() or depth == 0:\n return self.evaluationFunction(gameState)\n v = float(\"inf\")\n legalActions = gameState.getLegalActions(agentindex)\n for action in legalActions:\n nextState = gameState.generateSuccessor(agentindex, action)\n if agentindex == numghosts:\n v = min(v, maxvalue(nextState, alpha, beta, depth - 1))\n if v <= alpha:\n return v\n beta = min(beta, v)\n else:\n v = min(v, minvalue(nextState, alpha, beta, agentindex + 1, depth))\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n \n \n\n legalActions = gameState.getLegalActions(0)\n bestaction = Directions.STOP\n score = -(float(\"inf\"))\n alpha = -(float(\"inf\"))\n beta = float(\"inf\")\n for action in legalActions:\n nextState = gameState.generateSuccessor(0, action)\n prevscore = score\n score = max(score, minvalue(nextState, alpha, beta, 1, self.depth))\n if score > prevscore:\n bestaction = action\n if score >= beta:\n return bestaction\n alpha = max(alpha, score)\n return bestaction", "def pick_action(self, observation):\n if np.random.rand() < self.epsilon:\n action = np.random.randint(self.n_arm) # 从n个arm中随机选择一个\n else: # 1-epsilon greedy\n # 所谓reward, 就是success平均值\n posterior_means = self.get_posterior_mean() # shape:[arm, 1], 从中选择一个reward最大的arm\n action = random_argmax(posterior_means)\n\n return action", "def chooseAction(self, gameState):\n if len(self.getFood(gameState).asList()) < self.numFood:\n if gameState.getAgentState(self.offensiveIndex).isPacman == True:\n self.hasFood = True\n else:\n self.hasFood = False\n self.numFood = len(self.getFood(gameState).asList())\n if gameState.getAgentState(self.offensiveIndex).isPacman == False:\n self.hasFood = False\n\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.getValue(gameState, a, self.index, self.depth) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n # if self.index == 1:\n # print(self.hasFood)\n\n foodLeft = len(self.getFood(gameState).asList())\n\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = min([self.getMazeDistance(point, pos2) for point in self.getHomeLocations(gameState)])\n\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n\n return bestAction\n\n return random.choice(bestActions)", "def _next_action(self) -> SingleBriberyAction:\n self._current_rating = self.get_graph().eval_graph(self.get_briber_id())\n if self._previous_rating is None:\n self._previous_rating = self._current_rating\n next_act = SingleBriberyAction(self)\n try:\n self._next_node = self.get_graph().get_random_customer(excluding=self._info_gained | self._bribed)\n except IndexError:\n print(f\"WARNING: {self.__class__.__name__} found no influential nodes, not acting...\", file=sys.stderr)\n return next_act\n if self._current_rating - self._previous_rating > self._max_rating_increase:\n self._best_node = self._last_node\n self._max_rating_increase = self._current_rating - self._previous_rating\n maximum_bribe = min(self.get_resources(), self._bribe_to_max())\n if self._c >= self._i and self._best_node is not None and maximum_bribe > 0:\n next_act.add_bribe(self._best_node, maximum_bribe)\n self._bribed.add(self._best_node)\n self._info_gained = set()\n self._c = 0\n self._max_rating_increase = 0\n self._best_node = 0\n else:\n if self._c >= self._i:\n print(f\"WARNING: {self.__class__.__name__} has not found an influential node in {self._c} tries \"\n f\"(intended maximum tries {self._i}), continuing search...\",\n file=sys.stderr)\n # Bid an information gaining bribe, which is at most k, but is\n # smaller if you need to bribe less to get to the full bribe\n # or don't have enough money to bid k.\n next_act.add_bribe(self._next_node, min(self._bribe_to_max(), min(self.get_resources(), self._k)))\n self._info_gained.add(self._next_node)\n self._c = self._c + 1\n self._last_node = self._next_node\n self._previous_rating = self._current_rating\n return next_act", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n \" Max value \"\n def max_value(gameState, depth, alpha, beta):\n \n action = gameState.getLegalActions(0) \n if gameState.isWin() or gameState.isLose() or depth == self.depth:\n return (self.evaluationFunction(gameState), None)\n \n #initialize v to - infinity\n v = -(float(\"inf\"))\n for i in action:\n next_state = gameState.generateSuccessor(0, i)\n value, action = min_value(next_state, 1, depth, alpha, beta)\n \n if (v < value):\n v = value\n take_action = i\n\n if (v > beta):\n return (v, take_action)\n\n alpha = max(alpha, v)\n\n return (v, take_action)\n\n \n def min_value(gameState, agent, depth, alpha, beta):\n \n ghost_action = gameState.getLegalActions(agent) \n if len(ghost_action) == 0:\n return (self.evaluationFunction(gameState), None)\n\n #initialize v to +infinity\n v = float(\"inf\")\n \n\n for i in ghost_action:\n next_state = gameState.generateSuccessor(agent, i)\n ghost_no = gameState.getNumAgents() \n if (agent == ghost_no - 1):\n new_depth= depth+1\n value, action = max_value(next_state, new_depth, alpha, beta)\n else:\n new_agent= agent+1\n value, action = min_value(next_state, new_agent, depth, alpha, beta)\n \n if (value < v):\n v = value\n take_action = i\n\n if (v < alpha):\n return (v, take_action)\n\n beta = min(beta, v)\n\n return (v, take_action)\n\n alpha = -(float(\"inf\"))\n beta = float(\"inf\")\n final_value, final_action = max_value(gameState, 0, alpha, beta)\n return final_action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # Again, we use the fundamental foundation built in Q2 for Q4, however here we modify our minimizer function\n # to serve the purpose of finding the expected value\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n def maximizer(curState, agentIndex, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth)\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 0 # Starting value of zero to be incremented below\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, depth+1)\n weight = weight + temp\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, depth)\n weight = weight + temp\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n count[x] = minimizer(tempState,1,0)\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action" ]
[ "0.6727426", "0.63760585", "0.6346558", "0.62901396", "0.62776285", "0.6214636", "0.6204978", "0.6166298", "0.60946786", "0.6084147", "0.60721874", "0.6062389", "0.6024519", "0.6020219", "0.60040855", "0.59740645", "0.5961315", "0.5941096", "0.59343535", "0.5914328", "0.5909301", "0.5894947", "0.5892176", "0.5891886", "0.5861208", "0.5847508", "0.5835901", "0.5829674", "0.5823958", "0.5823844" ]
0.6629885
1
Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates with the database/database implementation. Currently checks for RedShift/BigQuery dialects
def _get_dialect_type_module(dialect): if dialect is None: logger.warning( "No sqlalchemy dialect found; relying in top-level sqlalchemy types." ) return sa try: # Redshift does not (yet) export types to top level; only recognize base SA types if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect): return dialect.sa except (TypeError, AttributeError): pass # Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple try: if ( isinstance( dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect, ) and bigquery_types_tuple is not None ): return bigquery_types_tuple except (TypeError, AttributeError): pass return dialect
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self) -> Text:\n\n return \"detect_dialect\"", "def get_dialect(self):\n\t\tif self.is_single_col:\n\t\t\treturn None\n\n\t\tif self.delimiter and self.quotechar:\n\t\t\treturn Dialect(self.delimiter, self.quotechar,\n\t\t\t\t\t\tTrue if self.escapechar is None else False,\n\t\t\t\t\t\tself.escapechar)\n\n\t\text = os.path.basename(self.file_path).rsplit('.', maxsplit=1)\n\t\text = ext[1].lower() if len(ext) > 1 else None\n\n\t\tif ext in TSV_EXTENSIONS:\n\t\t\tself.delimiter = '\\t'\n\t\t\tself.quotechar = '\"'\n\n\t\telse:\n\t\t\tf = self._open()\n\t\t\tlines = f.read().splitlines()\n\t\t\tf.close()\n\n\t\t\tif lines:\n\t\t\t\tdialect = self._determine_dialect(lines)\n\t\t\telse:\n\t\t\t\tdialect = None\n\n\t\t\tif dialect is None:\n\t\t\t\tself.is_single_col = True\n\t\t\telse:\n\t\t\t\tself.delimiter = dialect.delimiter\n\t\t\t\tself.quotechar = dialect.quotechar\n\t\t\t\tself.escapechar = dialect.escapechar\n\n\t\treturn self.get_dialect()", "def _get_engine(**kwargs):\n engine_name = 'MySQL'\n return engine_name", "def get_engine_type(self):", "def get_db_engine(self, db_type: t.Union[DBType, str]):\n db_type = DBType(db_type)\n return {\n DBType.WEB: self.ch.web_db_engine,\n DBType.ORCH: self.ch.orch_db_engine\n }[db_type]", "def engine_type(self):\n return self._engine_type", "def row_number_supported(self, dialect, dialect_override=None):\n if dialect_override is not None:\n return dialect_override\n supported_dialects = [\"mssql\", \"postgresql\", \"oracle\", \"mysql\",\n \"mariadb\"]\n sqlite_ver = sqlite3.sqlite_version_info\n if sqlite_ver[0] > 3 or (sqlite_ver[0] == 3 and sqlite_ver[1] >= 25):\n supported_dialects.append(\"sqlite\") # pragma: no cover\n return dialect.lower() in supported_dialects", "def _determine_dialect(self, lines):\n\t\tpermuts = [(quotechar, escapechar)\n\t\t\t\tfor quotechar in CSV_QUOTECHARS\n\t\t\t\tfor escapechar in CSV_ESCAPECHARS]\n\n\t\tfor delim in CSV_DELIMITERS:\n\t\t\tcounts = [line.count(delim) for line in lines]\n\n\t\t\tif min(counts) == 0:\n\t\t\t\tcontinue\n\n\t\t\tfor quotechar, escapechar in permuts:\n\t\t\t\tdoublequote = True if escapechar is None else False\n\n\t\t\t\treader = csv.reader(lines, delimiter=delim, quotechar=quotechar,\n\t\t\t\t\t\t\t\tdoublequote=doublequote, escapechar=escapechar)\n\n\t\t\t\ttry:\n\t\t\t\t\tassert len(set([len(line) for line in reader])) == 1\n\t\t\t\texcept AssertionError:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue # no suitable quoting found\n\n\t\t\tbreak # found it!\n\n\t\telse:\n\t\t\treturn None\n\n\t\treturn Dialect(delim, quotechar, doublequote, escapechar)", "def get_database_engine(scheme):\n scheme_to_engine = {\n 'postgresql': 'django.db.backends.postgresql',\n 'postgres': 'django.db.backends.postgresql',\n 'mysql': 'django.db.backends.mysql',\n 'sqlite': 'django.db.backends.sqlite3',\n 'oracle': 'django.db.backends.oracle',\n }\n\n try:\n return scheme_to_engine[scheme]\n except KeyError:\n raise ValueError(\"Unsupported database '{}'\".format(scheme))", "def get_database_engine() -> Engine:\n return engine", "def engine_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_type\")", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def engine_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_type\")", "def engine_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_type\")", "def _get_supported_grype_db_version() -> str:\n grype_wrapper = GrypeWrapperSingleton.get_instance()\n try:\n version_response = grype_wrapper.get_grype_version()\n except CommandException as exc:\n raise GrypeVersionCommandError() from exc\n try:\n return str(version_response[\"supportedDbSchema\"])\n except KeyError as exc:\n raise InvalidGrypeVersionResponse(json.dumps(version_response)) from exc", "def sql_type(dtype):\n if dtype.kind in (\"i\",\"u\",\"f\"):\n # It's a numeric type\n if dtype == np.int32:\n return \"integer\"\n elif dtype == np.int64:\n return \"bigint\"\n elif dtype == np.float32:\n return \"real\"\n elif dtype == np.float64:\n return \"float\"\n else:\n raise ValueError(\"Unsupported data type \"+str(dtype))\n elif dtype.kind == \"S\":\n # It's a string\n # Note: this assumes 1 byte = 1 character!\n return (\"char(%d)\" % dtype.itemsize)\n else:\n # Not numeric or string, don't know what to do with this!\n raise ValueError(\"Unsupported data type \"+str(dtype))", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def sql_server_license_type(self) -> Optional[str]:\n return pulumi.get(self, \"sql_server_license_type\")", "def get_sql_engine(cls, db_uri: str) -> Engine:\n return create_engine(db_uri)", "def get_engine_string(conn_type=\"mysql+pymysql\", DATABASE_NAME='msia423'):\n\n user = os.environ.get(\"MYSQL_USER\")\n password = os.environ.get(\"MYSQL_PASSWORD\")\n host = os.environ.get(\"MYSQL_HOST\")\n port = os.environ.get(\"MYSQL_PORT\")\n\n engine_string = \"{}://{}:{}@{}:{}/{}\".format(conn_type, user, password, host, port, DATABASE_NAME)\n\n logging.debug(\"engine string: %s\" % engine_string)\n return engine_string", "def _mysql_get_effective_sql_mode(engine):\n # Get the real effective SQL mode. Even when unset by\n # our own config, the server may still be operating in a specific\n # SQL mode as set by the server configuration.\n # Also note that the checkout listener will be called on execute to\n # set the mode if it's registered.\n row = engine.execute(\"SHOW VARIABLES LIKE 'sql_mode'\").fetchone()\n if row is None:\n return\n return row[1]", "def validate_engine_mode(engine_mode):\n\n VALID_DB_ENGINE_MODES = (\n \"provisioned\",\n \"serverless\",\n \"parallelquery\",\n \"global\",\n \"multimaster\",\n )\n\n if engine_mode not in VALID_DB_ENGINE_MODES:\n raise ValueError(\n \"DBCluster EngineMode must be one of: %s\" % \", \".join(VALID_DB_ENGINE_MODES)\n )\n return engine_mode", "def get_engine(self, connection_string):\n if connection_string not in sqlengines:\n sqlengines[connection_string] = create_engine(\n self.get_connection_string())\n return sqlengines[connection_string]", "def get_engine(self, connection_string):\n if connection_string not in sqlengines:\n sqlengines[connection_string] = create_engine(\n self.get_connection_string())\n return sqlengines[connection_string]", "def __str__(self):\n if self.dialect == \"sqlite\":\n db_conn = self._sqlite()\n elif self.dialect.startswith(\"postgres\"):\n db_conn = self._postgresql()\n elif self.dialect == \"mysql\":\n db_conn = self._mysql()\n else:\n raise ValueError(\"Database dialect not supported\")\n self._test_connection(db_conn)\n return db_conn", "def datastore_type(self) -> Optional[str]:\n return pulumi.get(self, \"datastore_type\")", "def _get_type(_type: TypeEngine) -> Type:\n types_map: Mapping[Type[TypeEngine], Type] = {\n sqltypes.Text: str,\n sqltypes.String: str,\n sqltypes.Integer: int,\n sqltypes.BigInteger: int,\n sqltypes.SmallInteger: int,\n sqltypes.Boolean: bool,\n sqltypes.Date: datetime.date,\n sqltypes.DateTime: datetime.datetime,\n sqltypes.Interval: datetime.timedelta,\n sqltypes.Time: datetime.time,\n sqltypes.Float: float,\n }\n\n try:\n return types_map[_type] # type: ignore ; # TODO: find out why\n except KeyError as e:\n raise UnsupportedTypeError(\n f\"Type {_type} is not currently supported by Scheems. Please make an issue at https://github.com/anand2312/scheems/issues to add it.\"\n ) from e", "def get_engine(self, db_name):\n pass", "def get_db_engine():\n # get database connection url\n connection_url = get_db_connection_url()\n\n # Create engine from connection url\n engine = create_engine(connection_url)\n\n return engine" ]
[ "0.65882957", "0.5873187", "0.58714145", "0.5629928", "0.56089145", "0.5572354", "0.5559554", "0.54928523", "0.5472916", "0.54684013", "0.5411899", "0.5407298", "0.5407298", "0.5366812", "0.5335421", "0.5184428", "0.5175622", "0.51554245", "0.51459706", "0.50762737", "0.50750774", "0.50494033", "0.5045315", "0.50195354", "0.50195354", "0.5017188", "0.49841008", "0.4973914", "0.4958576", "0.49432907" ]
0.7448258
0
Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the desired database. Also initializes the dialect to be used and configures usage statistics.
def __init__( self, name=None, credentials=None, data_context=None, engine=None, connection_string=None, url=None, batch_data_dict=None, create_temp_table=True, **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine ): super().__init__(name=name, batch_data_dict=batch_data_dict) self._name = name self._credentials = credentials self._connection_string = connection_string self._url = url self._create_temp_table = create_temp_table if engine is not None: if credentials is not None: logger.warning( "Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. " "Ignoring credentials." ) self.engine = engine elif credentials is not None: self.engine = self._build_engine(credentials=credentials, **kwargs) elif connection_string is not None: self.engine = sa.create_engine(connection_string, **kwargs) elif url is not None: self.drivername = urlparse(url).scheme self.engine = sa.create_engine(url, **kwargs) else: raise InvalidConfigError( "Credentials or an engine are required for a SqlAlchemyExecutionEngine." ) # Get the dialect **for purposes of identifying types** if self.engine.dialect.name.lower() in [ "postgresql", "mysql", "sqlite", "oracle", "mssql", ]: # These are the officially included and supported dialects by sqlalchemy self.dialect_module = import_library_module( module_name="sqlalchemy.dialects." + self.engine.dialect.name ) elif self.engine.dialect.name.lower() == "snowflake": self.dialect_module = import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ) elif self.engine.dialect.name.lower() == "redshift": self.dialect_module = import_library_module( module_name="sqlalchemy_redshift.dialect" ) elif self.engine.dialect.name.lower() == "bigquery": self.dialect_module = import_library_module( module_name="pybigquery.sqlalchemy_bigquery" ) else: self.dialect_module = None if self.engine and self.engine.dialect.name.lower() in [ "sqlite", "mssql", "snowflake", "mysql", ]: # sqlite/mssql temp tables only persist within a connection so override the engine self.engine = self.engine.connect() # Send a connect event to provide dialect type if data_context is not None and getattr( data_context, "_usage_statistics_handler", None ): handler = data_context._usage_statistics_handler handler.send_usage_message( event="execution_engine.sqlalchemy.connect", event_payload={ "anonymized_name": handler._execution_engine_anonymizer.anonymize( self.name ), "sqlalchemy_dialect": self.engine.name, }, success=True, ) # Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values, # and set the instance "_config" variable equal to the resulting dictionary. self._config = { "name": name, "credentials": credentials, "data_context": data_context, "engine": engine, "connection_string": connection_string, "url": url, "batch_data_dict": batch_data_dict, "module_name": self.__class__.__module__, "class_name": self.__class__.__name__, } self._config.update(kwargs) filter_properties_dict(properties=self._config, inplace=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_engine(self, credentials, **kwargs) -> \"sa.engine.Engine\":\n # Update credentials with anything passed during connection time\n drivername = credentials.pop(\"drivername\")\n schema_name = credentials.pop(\"schema_name\", None)\n if schema_name is not None:\n logger.warning(\n \"schema_name specified creating a URL with schema is not supported. Set a default \"\n \"schema on the user connecting to your database.\"\n )\n\n create_engine_kwargs = kwargs\n connect_args = credentials.pop(\"connect_args\", None)\n if connect_args:\n create_engine_kwargs[\"connect_args\"] = connect_args\n\n if \"private_key_path\" in credentials:\n options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(\n drivername, credentials\n )\n else:\n options = sa.engine.url.URL(drivername, **credentials)\n\n self.drivername = drivername\n engine = sa.create_engine(options, **create_engine_kwargs)\n return engine", "def get_engine(db_credentials):\n\n url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(\n user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'], \n port=db_credentials['port'], db=db_credentials['db'])\n engine = create_engine(url, pool_size = 50)\n \n return engine", "def create_engine(self):\n connection_string = f'postgresql://{self.user}:{self.password}@{self.host}/{self.database_name}'\n return create_engine(connection_string)", "def __init__(\n self,\n sql_username,\n sql_password,\n sql_host,\n sql_port,\n sql_db,\n sql_url_template=(\"mysql+mysqldb://{username}:{password}@\"\n \"{host}:{port}/{db}?charset=utf8mb4\"),\n **kwargs\n ):\n\n # Internalize arguments.\n self.sql_username = sql_username\n self.sql_password = sql_password\n self.sql_host = sql_host\n self.sql_port = sql_port\n self.sql_db = sql_db\n self.sql_url_template = sql_url_template\n\n # Inspecting the presence of keyword arguments and (should they not be\n # defined) setting defaults.\n self.sql_engine_pool_size = kwargs.get(\"sql_engine_pool_size\", 1)\n self.sql_engine_pool_recycle = kwargs.get(\n \"sql_engine_pool_recycle\", 3600\n )\n self.sql_engine_echo = kwargs.get(\"sql_engine_echo\", False)\n self.mysqldb_sscursor = kwargs.get(\"mysqldb_sscursor\", False)\n self.expire_on_commit = kwargs.get(\"expire_on_commit\", False)\n\n # create DB engine.\n self.engine = self.connect()\n\n # create new session.\n self.session_factory = sqlalchemy.orm.sessionmaker(\n bind=self.engine,\n expire_on_commit=self.expire_on_commit\n )", "def create_engine(self, base):\n try:\n engine = create_engine(\n \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % (\n self._db_settings['DATABASE_USER'],\n self._db_settings['DATABASE_PASS'],\n self._db_settings['DATABASE_IP'],\n self._db_settings['DATABASE_PORT'],\n self._db_settings['DATABASE_NAME']),\n poolclass=NullPool)\n #pool_size=5,\n #max_overflow=10)\n base.metadata.create_all(engine)\n # Fix for forking\n #register_after_fork(engine, engine.dispose)\n return engine\n except ValueError as e: # Potentially corrupted DB config.\n self.error_handler.abort_framework(\n \"Database configuration file is potentially corrupted. Please check %s\\n[DB] %s\" %\n (self.config.get_val('DATABASE_SETTINGS_FILE'), str(e)))\n except KeyError: # Indicates incomplete db config file\n self.error_handler.abort_framework(\"Incomplete database configuration settings in %s\" %\n self.config.get_val('DATABASE_SETTINGS_FILE'))\n except exc.OperationalError as e:\n self.error_handler.abort_framework(\"[DB] %s\\nRun 'make db-run' to start/setup db\" % str(e))", "def connect_sqlalchemy(\n self,\n url=None,\n **kwargs\n ):\n if url is not None:\n self.engine = create_engine(url, **kwargs)\n else:\n self.engine = create_engine(\n \"oracle+cx_oracle://{}:{}@{}\".format(self.user_id, self.password, self.dsn), **kwargs\n )\n return self.engine", "def setup_engine():\n print(\"Setting up engine\")\n engine = create_engine('mysql+pymysql://{}:{}@{}/govhack2015'.format(\n username, password, ip_address))\n\n return engine", "def init_db(engine):\n base = declarative_base(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n base.metadata.reflect(bind = engine)\n connection = engine.raw_connection()\n cursor = connection.cursor()\n return session, base, connection, cursor", "def get_engine(settings: dict) -> sqlalchemy.engine.base.Engine:\n engine = create_engine(settings['sqlalchemy.url'], pool_recycle=3600)\n return engine", "def __init__(self, *args, **kwargs):\n # Since SQLAlchemyProvider can cater to multiple databases, it is important\n # that we know which database we are dealing with, to run database-specific\n # statements like `PRAGMA` for SQLite.\n if \"DATABASE\" not in args[2]:\n logger.error(f\"Missing `DATABASE` information in conn_info: {args[2]}\")\n raise ConfigurationError(\"Missing `DATABASE` attribute in Connection info\")\n\n super().__init__(*args, **kwargs)\n\n kwargs = self._get_database_specific_engine_args()\n\n self._engine = create_engine(make_url(self.conn_info[\"DATABASE_URI\"]), **kwargs)\n\n if self.conn_info[\"DATABASE\"] == Database.POSTGRESQL.value:\n # Nest database tables under a schema, so that we have complete control\n # on creating/dropping db structures. We cannot control structures in the\n # the default `public` schema.\n #\n # Use `SCHEMA` value if specified as part of the conn info. Otherwise, construct\n # and use default schema name as `DB`_schema.\n schema = (\n self.conn_info[\"SCHEMA\"] if \"SCHEMA\" in self.conn_info else \"public\"\n )\n\n self._metadata = MetaData(bind=self._engine, schema=schema)\n else:\n self._metadata = MetaData(bind=self._engine)\n\n # A temporary cache of already constructed model classes\n self._model_classes = {}", "def get_sql_engine(cls, db_uri: str) -> Engine:\n return create_engine(db_uri)", "def connect(self, url=None):\n\n # If no URL was provided then create one through `self.create_url`.\n if not url:\n url = self.create_url()\n\n # Create the engine.\n engine = sqlalchemy.create_engine(\n url,\n pool_size=self.sql_engine_pool_size,\n pool_recycle=self.sql_engine_pool_recycle,\n echo=self.sql_engine_echo,\n )\n\n # Connect to the database.\n engine.connect()\n\n return engine", "def sql_alch_engine(tunnel):\n\n port = str(tunnel.local_bind_port)\n\n # Create a database connection using sqlalchemy\n connection_addr = ('postgresql://'\n + config.dbreddit['user']\n + ':'\n + config.dbreddit['password']\n + '@localhost:'\n + port\n + '/'\n + config.dbreddit['dbname'])\n try:\n engine = create_engine(connection_addr)\n return engine\n except Exception as e:\n print(e)", "def create_engine(self):\n return create_engine('sqlite:///' + self.database_name, echo=True)", "def get_engine(db_params: Dict[str, str]) -> sa.engine:\r\n db_uri = get_uri(db_params)\r\n return sa.create_engine(db_uri)", "def init_database(cls):\n conn = config.db_connection_string(Settings)\n cls.Engine = create_engine(conn, echo=Settings.get('DEBUG'))\n cls.Session = sessionmaker(bind=cls.Engine)\n return cls", "def __init__(self, connection_url, echo=False):\n if not connection_url:\n raise ValueError('No database connection URL provided.')\n engine = create_engine(connection_url, echo=echo)\n PipelineRun.metadata.create_all(engine)\n self.session_factory = sessionmaker(bind=engine)", "def create_engine(uri=None, echo=None, **kwargs):\n conf = get_engine_conf()\n conf.update(kwargs)\n\n # replace 'dburi' with 'uri' for consistency\n if 'dburi' in conf:\n if not 'uri' in conf:\n conf['uri'] = conf['dburi']\n del conf['dburi']\n\n # override config with passed-in values\n conf['uri'] = uri or conf.get('uri')\n conf['echo'] = asbool(echo) or conf.get('echo')\n\n uri = conf.pop('uri')\n assert uri\n\n # call create_engine or fetch engine from cache\n\n ## use a sorted list of tuples since order isn't guaranteed\n ## in the dict\n conf_key = str(sorted(conf.items(), key=lambda x: x[0]))\n\n engine_key = '%s|%s' % (uri, conf_key)\n db_engines = pylons.config['pylons.db_engines']\n if engine_key in db_engines:\n engine = db_engines[engine_key]\n else:\n engine = db_engines[engine_key] = \\\n sqlalchemy.create_engine(uri, **conf)\n\n log.debug(\"Created engine using uri: %s with engine arguments %s\", uri, conf)\n return engine", "def __init__(self):\n user = os.getenv('URL_MYSQL_USER')\n passwd = os.getenv('URL_MYSQL_PWD')\n host = os.getenv('URL_MYSQL_HOST')\n database = os.getenv('URL_MYSQL_DB')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(user, passwd, host, database))", "def engine(db_url=None):\n db_url = db_url or os.getenv(\"DB_URL\")\n if not db_url:\n raise ValueError(\"database URL is required\")\n print(f\"Returning an engine for {db_url}\")\n return create_engine(db_url)", "def __init__(self, config):\n\n engine = self.__my_create_engine(config)\n\n if not engine:\n raise Exception(\"No engine created\")\n\n engine.connect()\n #metadata = MetaData(bind=engine)\n Session = sessionmaker(bind=engine)\n\n # Set the objects to work with\n self.session = Session()", "def get_engine(username, password, ipaddress, database):\n #TODO(rnirmal):Based on permissions issues being resolved we may revert\n #url = URL(drivername='mysql', host='localhost',\n # query={'read_default_file': '/etc/mysql/my.cnf'})\n global ENGINE\n if ENGINE:\n return ENGINE\n if database:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306/%s\" %\n (username, password, ipaddress,database),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n else:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306\" %\n (username, password, ipaddress),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n return ENGINE", "def _set_database_engine(self, config):\n confi = config.copy()\n superuse = confi.pop(\"supdatabase\"), confi.pop(\"supusername\"), confi.pop(\"suppassword\")\n self.__engine = create_engine(URL(**confi))\n try:\n try:\n if self.__engine is not None:\n conn = self.__engine.connect()\n conn.close()\n except OperationalError:\n configdef = confi.copy()\n configdef[\"database\"] = superuse[0]\n self.__engine.dispose()\n self.__engine = create_engine(URL(**configdef))\n try:\n conn = self.__engine.connect()\n try:\n conn.execute(\"commit\")\n conn.execute(\"CREATE DATABASE %s;\" % config[\"database\"])\n finally:\n conn.close()\n except OperationalError:\n self.__engine.dispose()\n raise\n self.__engine.dispose()\n self.__engine = create_engine(URL(**confi))\n except ProgrammingError:\n raise", "def logic_db_engine(self):\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n try:\n postgres_engine = create_engine(f'postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return postgres_engine", "def get_db_engine():\n # get database connection url\n connection_url = get_db_connection_url()\n\n # Create engine from connection url\n engine = create_engine(connection_url)\n\n return engine", "def __init__(self, dialect, database, username=None, password=None,\n host=None, port=None, query=None):\n db_url = URL(drivername=dialect, database=database, username=username,\n password=password, host=host, port=port, query=query)\n self.engine = create_engine(db_url)\n self.session = None\n self.get_session = sessionmaker(bind=self.engine,\n expire_on_commit=False)\n\n # create ORM objects for the tables\n self._define_tables()", "def initialize_engine( conn=environment.ENGINE ):\n if conn is not None:\n method = { 'sqlite': create_sqlite_engine,\n 'sqlite-file': _create_sqlite_file_engine,\n # 'mysql': _create_mysql_engine,\n # 'mysql_test': _create_mysql_test_engine\n }.get( conn )\n\n engine = method()\n # Base.metadata.create_all( engine )\n return engine\n\n raise ValueError", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))" ]
[ "0.7402342", "0.6850923", "0.6795974", "0.6642406", "0.6592185", "0.65612894", "0.64122355", "0.64055586", "0.63875616", "0.6348471", "0.6341745", "0.63360906", "0.6322239", "0.63080424", "0.62338716", "0.62057036", "0.6178479", "0.61481386", "0.61264646", "0.61171126", "0.6108398", "0.61009365", "0.6092388", "0.60877854", "0.6072697", "0.606867", "0.60512143", "0.6049943", "0.6049943", "0.6049943" ]
0.73738885
1
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path.
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine": # Update credentials with anything passed during connection time drivername = credentials.pop("drivername") schema_name = credentials.pop("schema_name", None) if schema_name is not None: logger.warning( "schema_name specified creating a URL with schema is not supported. Set a default " "schema on the user connecting to your database." ) create_engine_kwargs = kwargs connect_args = credentials.pop("connect_args", None) if connect_args: create_engine_kwargs["connect_args"] = connect_args if "private_key_path" in credentials: options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url( drivername, credentials ) else: options = sa.engine.url.URL(drivername, **credentials) self.drivername = drivername engine = sa.create_engine(options, **create_engine_kwargs) return engine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_engine(db_credentials):\n\n url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(\n user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'], \n port=db_credentials['port'], db=db_credentials['db'])\n engine = create_engine(url, pool_size = 50)\n \n return engine", "def __init__(\n self,\n name=None,\n credentials=None,\n data_context=None,\n engine=None,\n connection_string=None,\n url=None,\n batch_data_dict=None,\n create_temp_table=True,\n **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine\n ):\n super().__init__(name=name, batch_data_dict=batch_data_dict)\n self._name = name\n\n self._credentials = credentials\n self._connection_string = connection_string\n self._url = url\n self._create_temp_table = create_temp_table\n\n if engine is not None:\n if credentials is not None:\n logger.warning(\n \"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. \"\n \"Ignoring credentials.\"\n )\n self.engine = engine\n elif credentials is not None:\n self.engine = self._build_engine(credentials=credentials, **kwargs)\n elif connection_string is not None:\n self.engine = sa.create_engine(connection_string, **kwargs)\n elif url is not None:\n self.drivername = urlparse(url).scheme\n self.engine = sa.create_engine(url, **kwargs)\n else:\n raise InvalidConfigError(\n \"Credentials or an engine are required for a SqlAlchemyExecutionEngine.\"\n )\n\n # Get the dialect **for purposes of identifying types**\n if self.engine.dialect.name.lower() in [\n \"postgresql\",\n \"mysql\",\n \"sqlite\",\n \"oracle\",\n \"mssql\",\n ]:\n # These are the officially included and supported dialects by sqlalchemy\n self.dialect_module = import_library_module(\n module_name=\"sqlalchemy.dialects.\" + self.engine.dialect.name\n )\n\n elif self.engine.dialect.name.lower() == \"snowflake\":\n self.dialect_module = import_library_module(\n module_name=\"snowflake.sqlalchemy.snowdialect\"\n )\n elif self.engine.dialect.name.lower() == \"redshift\":\n self.dialect_module = import_library_module(\n module_name=\"sqlalchemy_redshift.dialect\"\n )\n elif self.engine.dialect.name.lower() == \"bigquery\":\n self.dialect_module = import_library_module(\n module_name=\"pybigquery.sqlalchemy_bigquery\"\n )\n else:\n self.dialect_module = None\n\n if self.engine and self.engine.dialect.name.lower() in [\n \"sqlite\",\n \"mssql\",\n \"snowflake\",\n \"mysql\",\n ]:\n # sqlite/mssql temp tables only persist within a connection so override the engine\n self.engine = self.engine.connect()\n\n # Send a connect event to provide dialect type\n if data_context is not None and getattr(\n data_context, \"_usage_statistics_handler\", None\n ):\n handler = data_context._usage_statistics_handler\n handler.send_usage_message(\n event=\"execution_engine.sqlalchemy.connect\",\n event_payload={\n \"anonymized_name\": handler._execution_engine_anonymizer.anonymize(\n self.name\n ),\n \"sqlalchemy_dialect\": self.engine.name,\n },\n success=True,\n )\n\n # Gather the call arguments of the present function (and add the \"class_name\"), filter out the Falsy values,\n # and set the instance \"_config\" variable equal to the resulting dictionary.\n self._config = {\n \"name\": name,\n \"credentials\": credentials,\n \"data_context\": data_context,\n \"engine\": engine,\n \"connection_string\": connection_string,\n \"url\": url,\n \"batch_data_dict\": batch_data_dict,\n \"module_name\": self.__class__.__module__,\n \"class_name\": self.__class__.__name__,\n }\n self._config.update(kwargs)\n filter_properties_dict(properties=self._config, inplace=True)", "def logic_db_engine(self):\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n try:\n postgres_engine = create_engine(f'postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return postgres_engine", "def execute(credentials):", "def etl_command(credentials_file, inventory_file):\n with open(credentials_file, 'r') as f:\n credentials = yaml.load(f)\n with open(inventory_file, 'r') as f:\n inventory = yaml.load(f)\n\n engine = sqlalchemy.create_engine('postgresql://', connect_args=credentials)\n\n load_cj_data(engine, credentials, inventory, click)\n load_edu_data(engine, credentials, inventory, click)", "def create_engine(self):\n connection_string = f'postgresql://{self.user}:{self.password}@{self.host}/{self.database_name}'\n return create_engine(connection_string)", "def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))", "def __init__(self):\n user = os.getenv('URL_MYSQL_USER')\n passwd = os.getenv('URL_MYSQL_PWD')\n host = os.getenv('URL_MYSQL_HOST')\n database = os.getenv('URL_MYSQL_DB')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(user, passwd, host, database))", "def __init__(self, provider, hostname, **kwargs):\r\n self.provider = provider\r\n self.hostname = hostname\r\n self.username = kwargs.get('username')\r\n self.password = kwargs.get('password')\r\n self.database = kwargs.get('database')\r\n\r\n if self.provider not in SupportedDatabase.__members__:\r\n db_list = ','.join(list(SupportedDatabase.__members__))\r\n raise Exception(\r\n provider + ', is not supported at this time. Following databases are only supported : ' + db_list)\r\n\r\n self.db = Database()\r\n self.db.bind(provider=SupportedDatabase[self.provider].value, user=self.username, password=self.password,\r\n host=self.hostname, database=self.database)", "def engine(db_url=None):\n db_url = db_url or os.getenv(\"DB_URL\")\n if not db_url:\n raise ValueError(\"database URL is required\")\n print(f\"Returning an engine for {db_url}\")\n return create_engine(db_url)", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(URL(**product_crawlers.settings.DATABASE))", "def get_engine(username, password, ipaddress, database):\n #TODO(rnirmal):Based on permissions issues being resolved we may revert\n #url = URL(drivername='mysql', host='localhost',\n # query={'read_default_file': '/etc/mysql/my.cnf'})\n global ENGINE\n if ENGINE:\n return ENGINE\n if database:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306/%s\" %\n (username, password, ipaddress,database),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n else:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306\" %\n (username, password, ipaddress),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n return ENGINE", "def setup(\r\n hosts,\r\n username=None,\r\n password=None,\r\n max_connections=10,\r\n default_keyspace=None,\r\n consistency='ONE',\r\n timeout=None):\r\n global _max_connections\r\n global connection_pool\r\n _max_connections = max_connections\r\n\r\n if default_keyspace:\r\n from cqlengine import models\r\n models.DEFAULT_KEYSPACE = default_keyspace\r\n\r\n _hosts = []\r\n for host in hosts:\r\n host = host.strip()\r\n host = host.split(':')\r\n if len(host) == 1:\r\n port = 9160\r\n elif len(host) == 2:\r\n try:\r\n port = int(host[1])\r\n except ValueError:\r\n raise CQLConnectionError(\"Can't parse port as int {}\".format(':'.join(host)))\r\n else:\r\n raise CQLConnectionError(\"Can't parse host string {}\".format(':'.join(host)))\r\n\r\n _hosts.append(Host(host[0], port))\r\n\r\n if not _hosts:\r\n raise CQLConnectionError(\"At least one host required\")\r\n\r\n connection_pool = ConnectionPool(_hosts, username, password, consistency, timeout)", "def __init__(self,db_user,db_pwd,db_host,db_port,db_name):\n self.std_error = \"Could not connect to database. Check that it is running correctly.\"\n self.engine = db.create_engine(f'mysql+pymysql://{db_user}:{db_pwd}@{db_host}:{db_port}/{db_name}')", "def _set_database_engine(self, config):\n confi = config.copy()\n superuse = confi.pop(\"supdatabase\"), confi.pop(\"supusername\"), confi.pop(\"suppassword\")\n self.__engine = create_engine(URL(**confi))\n try:\n try:\n if self.__engine is not None:\n conn = self.__engine.connect()\n conn.close()\n except OperationalError:\n configdef = confi.copy()\n configdef[\"database\"] = superuse[0]\n self.__engine.dispose()\n self.__engine = create_engine(URL(**configdef))\n try:\n conn = self.__engine.connect()\n try:\n conn.execute(\"commit\")\n conn.execute(\"CREATE DATABASE %s;\" % config[\"database\"])\n finally:\n conn.close()\n except OperationalError:\n self.__engine.dispose()\n raise\n self.__engine.dispose()\n self.__engine = create_engine(URL(**confi))\n except ProgrammingError:\n raise", "def connect(self, credentials):\n try:\n #dsn = credentials.host() + ':' + credentials.port()\n if credentials.port() is not None:\n self.db = msq.connect(host=credentials.host(), port=credentials.port(),\n user=credentials.username(),\n passwd=credentials.password(),\n db=credentials.database(),\n charset='utf8',\n use_unicode=True,\n cursorclass=MySQLdb.cursors.DictCursor)\n else:\n self.db = msq.connect(host=credentials.host(),\n user=credentials.username(),\n passwd=credentials.password(),\n db=credentials.database(),\n charset='utf8',\n use_unicode=True,\n cursorclass=MySQLdb.cursors.DictCursor)\n\n self.db.autocommit(True)\n self.dbc = self.db.cursor()\n self.status = 'Connected to: %s' % credentials.database()\n except MySQLdb.Error as e:\n print \"Connection error : %s\" % e\n raise", "def Engine_Connection(self):\n try:\n # Engine Connection\n engine = create_engine('mysql+mysqlconnector://{}:{}@{}/{}'.format(self.user,self.password,self.host,self.database))\n return['Engine created', engine]\n except engine.closed():\n return print(\"Failed to create engine\")", "def __init__(self, *, username: str = None, password: str = None) -> None:\n LOG.debug(f\"Authenticating to PostgreSQL database using {pg_environment()}\")\n\n connect_params = {\n \"cursor_factory\": NamedTupleCursor,\n \"fallback_application_name\": fallback_application_name(),\n\n **({\"user\": username} if username is not None else {}),\n **({\"password\": password} if password is not None else {}),\n }\n\n try:\n # connect() requires a DSN as the first arg even if the connection\n # details are fully-specified by the environment, but we don't need to\n # fill it with anything.\n self.connection = psycopg2.connect(\"\", **connect_params)\n except DatabaseError as error:\n LOG.error(f\"Authentication failed: {error}\")\n raise error from None\n\n LOG.info(f\"Connected to {self.session_info()}\")", "def __init__(self, sql_file, engine=\"SQLite\", user=None, password=None,\n host=\"localhost\", LOG=None, attach=None):\n\n # attach cases\n if attach is None:\n attach = {}\n else:\n attach = attach.copy()\n\n if isinstance(sql_file, str):\n\n for e in DatabaseCore._engines:\n if sql_file.startswith(e + \":::\"):\n engine = e\n sql_file = sql_file[len(e) + 3:]\n if \"###\" in sql_file:\n host, sql_file = sql_file.split(\"###\")\n break\n\n if \";\" in sql_file:\n li = [s.strip() for s in sql_file.split(\";\")]\n sql_file = li[0]\n rest = li[1:]\n for s in rest:\n ok = s.split(\",\")\n if len(ok) != 2:\n raise DBException( # pragma: no cover\n \"unable to find an alias in %r\" % s)\n nick = ok[0].strip()\n file = \",\".join(ok[1:])\n attach[nick] = file.strip()\n elif sql_file.startswith(\":\"):\n if sql_file != \":memory:\":\n raise FileNotFoundError( # pragma: no cover\n \"unable to interpret file: %r\" % sql_file)\n\n # some initialization\n self._password = password\n self._user = user\n self._host = host\n\n # the rest\n if LOG is None:\n def blind(*li, **p): # pragma: no cover\n pass\n LOG = blind # pragma: no cover\n self.LOG = LOG\n\n if isinstance(LOG, dict):\n raise TypeError( # pragma: no cover\n \"fLOG should be a function, not a dictionary\")\n if isinstance(self.LOG, dict):\n raise TypeError( # pragma: no cover\n \"LOG should be a function, not a dictionary\")\n\n if engine == \"SQLite\":\n self._sql_file = sql_file\n self._engine = engine\n\n elif engine == \"ODBCMSSQL\":\n raise DBException( # pragma: no cover\n \"Unable to connect to a SQL server.\")\n\n else:\n raise DBException( # pragma: no cover\n \"unfounded engine %s in %s\" %\n (engine, \", \".join(\n DatabaseCore._engines)))\n\n # write a file able to build a database summary\n if isinstance(sql_file, str) and not self.isMemory():\n folder = os.path.split(sql_file)[0]\n if len(folder) > 0 and not os.path.exists(folder):\n os.makedirs(folder)\n summary = os.path.join(folder, \"temp_quick_look_up.py\")\n if not os.path.exists(summary):\n #cwd = os.path.join (os.path.abspath (os.path.split (__file__) [0]), \"..\", \"..\")\n #fi = os.path.split (sql_file) [1]\n\n if hasattr(DatabaseCore, \"SCRIPT_LOOKUP\"):\n script = DatabaseCore.SCRIPT_LOOKUP\n lines = script.split(\"\\n\")\n lines = [li if \"__CWD__ =\" not in li else\n li.replace(\n \"(__file__)\",\n \"(r'%s')\" %\n os.path.realpath(__file__))\n for li in lines]\n script = \"\\n\".join(lines)\n script = script.replace(\n \"python quick_look_up.py\",\n \"%s quick_look_up.py\" %\n sys.executable)\n self.LOG(\"creating script \", summary)\n try:\n f = open(summary, \"w\")\n f.write(script)\n f.close()\n except IOError:\n self.LOG(\"unable to write \", summary)\n\n self._attach = attach\n self._buffer_insert = []\n self._buffer_insert_s = 0\n\n if isinstance(sql_file, str) and self.isMemory():\n self._connection = SQLite.connect(self._sql_file)\n elif isinstance(sql_file, SQLite.Connection):\n self._connection = sql_file\n self._sql_file = \":memory:\"", "def __init__(self, credentials=None, connection=None):\n if credentials is not None:\n self.connection = pymysql_driver.connect(\n user=credentials['user'],\n password=credentials['password'],\n host=credentials['host'],\n database=credentials['database'],\n autocommit=False,\n connect_timeout=2,\n cursorclass=pymysql_driver.cursors.DictCursor\n )\n elif connection is not None:\n self.connection = connection\n else:\n raise ValueError(\"Must provide either the database credentials or connection object\")", "def __init__(\n self,\n sql_username,\n sql_password,\n sql_host,\n sql_port,\n sql_db,\n sql_url_template=(\"mysql+mysqldb://{username}:{password}@\"\n \"{host}:{port}/{db}?charset=utf8mb4\"),\n **kwargs\n ):\n\n # Internalize arguments.\n self.sql_username = sql_username\n self.sql_password = sql_password\n self.sql_host = sql_host\n self.sql_port = sql_port\n self.sql_db = sql_db\n self.sql_url_template = sql_url_template\n\n # Inspecting the presence of keyword arguments and (should they not be\n # defined) setting defaults.\n self.sql_engine_pool_size = kwargs.get(\"sql_engine_pool_size\", 1)\n self.sql_engine_pool_recycle = kwargs.get(\n \"sql_engine_pool_recycle\", 3600\n )\n self.sql_engine_echo = kwargs.get(\"sql_engine_echo\", False)\n self.mysqldb_sscursor = kwargs.get(\"mysqldb_sscursor\", False)\n self.expire_on_commit = kwargs.get(\"expire_on_commit\", False)\n\n # create DB engine.\n self.engine = self.connect()\n\n # create new session.\n self.session_factory = sqlalchemy.orm.sessionmaker(\n bind=self.engine,\n expire_on_commit=self.expire_on_commit\n )", "def connect_sqlalchemy():\n username = os.getenv('db_user')\n password = os.getenv('db_password')\n database = os.getenv('db_name')\n host = os.getenv('db_host')\n\n if username is None or password is None or database is None or host is None:\n raise Exception(\"\"\"Cannot connect to SQLAlchemy Engine. Database configurations are not set in env.\n \\n Set env like following:\n \\t export db_host=example.com\n \\t export db_name=my_db_name\n \\t export db_user=my_db_user\n \\t export db_password=my_db_password\"\"\")\n engine = create_engine('mysql://%s:%s@%s/%s' % (username, password, host, database))\n return engine.connect()", "async def initialize(\n self,\n url: str,\n password: str | None,\n *,\n isolation_level: Optional[str] = None,\n ) -> None:\n if self._override_engine:\n self._session = await create_async_session(self._override_engine)\n else:\n self._engine = create_database_engine(\n url, password, isolation_level=isolation_level\n )\n self._session = await create_async_session(self._engine)", "def create_conn():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n f = open(os.path.join(dir_path, \"aws_keys\"), \"r\")\n keys = f.read().split(\"\\n\")\n\n return pymysql.connect(\n host= keys[2],\n user=keys[3],\n password=keys[4],\n port=int(keys[5]))", "def __init__(self, host, username, passwd):\n self.host = host\n self.username = username\n self.passwd = passwd\n self.engines = {}", "def connect(self, url=None):\n\n # If no URL was provided then create one through `self.create_url`.\n if not url:\n url = self.create_url()\n\n # Create the engine.\n engine = sqlalchemy.create_engine(\n url,\n pool_size=self.sql_engine_pool_size,\n pool_recycle=self.sql_engine_pool_recycle,\n echo=self.sql_engine_echo,\n )\n\n # Connect to the database.\n engine.connect()\n\n return engine" ]
[ "0.70442784", "0.6375183", "0.6234762", "0.61758363", "0.60751504", "0.6024599", "0.599556", "0.59493566", "0.5944619", "0.5925997", "0.5916051", "0.5916051", "0.5916051", "0.5916051", "0.58947533", "0.5882432", "0.5831161", "0.58047366", "0.5755691", "0.57500356", "0.5732881", "0.5727176", "0.5707187", "0.57049793", "0.5699837", "0.56898886", "0.5671391", "0.5663731", "0.5631863", "0.56310725" ]
0.7142299
0
For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if bundling the metrics together is not possible.
def resolve_metric_bundle( self, metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]], ) -> dict: resolved_metrics = dict() # We need a different query for each domain (where clause). queries: Dict[Tuple, dict] = dict() for ( metric_to_resolve, engine_fn, compute_domain_kwargs, accessor_domain_kwargs, metric_provider_kwargs, ) in metric_fn_bundle: if not isinstance(compute_domain_kwargs, IDDict): compute_domain_kwargs = IDDict(compute_domain_kwargs) domain_id = compute_domain_kwargs.to_id() if domain_id not in queries: queries[domain_id] = { "select": [], "ids": [], "domain_kwargs": compute_domain_kwargs, } queries[domain_id]["select"].append( engine_fn.label(metric_to_resolve.metric_name) ) queries[domain_id]["ids"].append(metric_to_resolve.id) for query in queries.values(): selectable, compute_domain_kwargs, _ = self.get_compute_domain( query["domain_kwargs"], domain_type="identity" ) assert len(query["select"]) == len(query["ids"]) try: res = self.engine.execute( sa.select(query["select"]).select_from(selectable) ).fetchall() logger.debug( f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}" ) except OperationalError as oe: exception_message: str = "An SQL execution Exception occurred. " exception_traceback: str = traceback.format_exc() exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".' logger.error(exception_message) raise ExecutionEngineError(message=exception_message) assert ( len(res) == 1 ), "all bundle-computed metrics must be single-value statistics" assert len(query["ids"]) == len( res[0] ), "unexpected number of metrics returned" for idx, id in enumerate(query["ids"]): resolved_metrics[id] = convert_to_json_serializable(res[0][idx]) return resolved_metrics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolve_metric_bundle(\n self,\n metric_fn_bundle: Iterable[MetricComputationConfiguration],\n ) -> Dict[Tuple[str, str, str], MetricValue]:\n resolved_metrics: Dict[Tuple[str, str, str], MetricValue] = {}\n\n res: List[pyspark.Row]\n\n aggregates: Dict[Tuple[str, str, str], dict] = {}\n\n aggregate: dict\n\n domain_id: Tuple[str, str, str]\n\n bundled_metric_configuration: MetricComputationConfiguration\n for bundled_metric_configuration in metric_fn_bundle:\n metric_to_resolve: MetricConfiguration = (\n bundled_metric_configuration.metric_configuration\n )\n metric_fn: Any = bundled_metric_configuration.metric_fn\n compute_domain_kwargs: dict = (\n bundled_metric_configuration.compute_domain_kwargs or {}\n )\n if not isinstance(compute_domain_kwargs, IDDict):\n compute_domain_kwargs = IDDict(compute_domain_kwargs)\n\n domain_id = compute_domain_kwargs.to_id()\n if domain_id not in aggregates:\n aggregates[domain_id] = {\n \"column_aggregates\": [],\n \"metric_ids\": [],\n \"domain_kwargs\": compute_domain_kwargs,\n }\n\n aggregates[domain_id][\"column_aggregates\"].append(metric_fn)\n aggregates[domain_id][\"metric_ids\"].append(metric_to_resolve.id)\n\n for aggregate in aggregates.values():\n domain_kwargs: dict = aggregate[\"domain_kwargs\"]\n df: pyspark.DataFrame = self.get_domain_records(domain_kwargs=domain_kwargs)\n\n assert len(aggregate[\"column_aggregates\"]) == len(aggregate[\"metric_ids\"])\n\n res = df.agg(*aggregate[\"column_aggregates\"]).collect()\n\n logger.debug(\n f\"SparkDFExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}\"\n )\n\n assert (\n len(res) == 1\n ), \"all bundle-computed metrics must be single-value statistics\"\n assert len(aggregate[\"metric_ids\"]) == len(\n res[0]\n ), \"unexpected number of metrics returned\"\n\n idx: int\n metric_id: Tuple[str, str, str]\n for idx, metric_id in enumerate(aggregate[\"metric_ids\"]):\n # Converting DataFrame.collect() results into JSON-serializable format produces simple data types,\n # amenable for subsequent post-processing by higher-level \"Metric\" and \"Expectation\" layers.\n resolved_metrics[metric_id] = convert_to_json_serializable(\n data=res[0][idx]\n )\n\n return resolved_metrics", "def _resolve_queries(self, availability_cache: Dict = None) -> None:\n availability_cache = availability_cache or {}\n\n for query in self._data_queries:\n entity = query.entity\n if isinstance(entity, str) or isinstance(query, MeasureQueryInfo):\n # If we were unable to fetch entity (404/403) or if we're processing a measure processor\n continue\n query = query.query\n coord = query.coordinate\n entity_dimension = entity.data_dimension\n entity_id = entity.get_marquee_id()\n\n query_start = query.start\n query_end = query.end\n if isinstance(query_start, RelativeDate):\n key = get_entity_rdate_key_from_rdate(entity_id, query_start)\n query.start = self.rule_cache[key]\n\n if isinstance(query_end, RelativeDate):\n key = get_entity_rdate_key_from_rdate(entity_id, query_end)\n query.end = self.rule_cache[key]\n\n if entity_dimension not in coord.dimensions:\n if coord.dataset_id:\n # don't need to fetch the data set if user supplied it\n coord.set_dimensions({entity_dimension: entity.get_marquee_id()})\n query.coordinate = coord\n else:\n # Need to resolve the dataset from availability\n entity_id = entity.get_marquee_id()\n try:\n raw_availability = availability_cache.get(entity_id)\n if raw_availability is None:\n raw_availability: Dict = GsSession.current._get(f'/data/measures/{entity_id}/availability')\n availability_cache[entity.get_marquee_id()] = raw_availability\n query.coordinate = entity.get_data_coordinate(measure=coord.measure,\n dimensions=coord.dimensions,\n frequency=coord.frequency,\n availability=raw_availability)\n except Exception as e:\n _logger.info(\n f'Could not get DataCoordinate with {coord} for entity {entity_id} due to {e}')", "def compute(self, *args):\n computs = {0: self.compute_cov(), 1: self.compute_corr(),\n 2: self.compute_prec(), 3: self.compute_partial(),\n 5: self.compute_tangent()}\n measures_steps = {'correlations': [0, 1],\n 'partial correlations': [0, 2, 3],\n 'covariances': [0],\n 'precisions': [0, 2],\n 'tangent plane': [0, 5]}\n steps = [step for name in args for step in measures_steps[name]]\n steps = set(steps)\n for n_step in steps:\n computs[n_step]\n output = {'correlations': self.corr_,\n 'partial correlations': self.partial_corr_,\n 'covariances': self.cov_,\n 'precisions': self.prec_,\n 'tangent plane': self.tangent_}\n self.conn = {}\n for measure_name in args:\n self.conn[measure_name] = output[measure_name]\n return self", "def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def configure_metrics(self):\n allowed = list(METRIC_LOOKUP.keys()) + [None]\n metrics = nn.ModuleDict()\n for k, m in self.branch_metrics.items():\n for metric_name in m:\n if metric_name not in allowed:\n raise ValueError(\n f\"Illegal metric given. Got: {metric_name}. Allowed: {allowed}.\"\n )\n\n if metric_name is not None:\n metric = METRIC_LOOKUP[metric_name]()\n else:\n metric = None\n\n metrics[f\"{k}_{metric_name}\"] = metric\n\n return metrics", "def generate_metrics_querys(metrics: List, period: int = 30, stats: str = 'Sum') -> Tuple[List, Dict]:\r\n metricsquery = [] #type: List \r\n resultsquery = defaultdict(list) #type: DefaultDict\r\n for metric in metrics:\r\n identity = randomString() \r\n metricsquery.append({'Id': identity, 'MetricStat': {'Metric': metric, 'Period': period, 'Stat': stats} })\r\n resultsquery[identity].append({'query': {'MetricStat': {'Metric': metric, 'Period': period, 'Stat': stats}}})\r\n return metricsquery, dict(resultsquery)", "def compute_all(self) -> Any:\n self._check_for_increment(\"compute_all\")\n # The i!=0 accounts for the self._base_metric should be ignored\n res = [metric.compute() for i, metric in enumerate(self) if i != 0]\n try:\n if isinstance(res[0], dict):\n keys = res[0].keys()\n return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}\n if isinstance(res[0], list):\n return torch.stack([torch.stack(r, dim=0) for r in res], 0)\n return torch.stack(res, dim=0)\n except TypeError: # fallback solution to just return as it is if we cannot succesfully stack\n return res", "def _report_intermediates_and_final(query_result: list[Any], metric: str, query: str, scale: float = 1.) -> tuple[float, list[float]]:\n if not query_result:\n raise ValueError('Invalid query. Results from benchmark is empty: ' + query)\n if len(query_result) > 1:\n query_result = random.choice(query_result)\n else:\n query_result = query_result[0]\n query_dict = cast(dict, query_result)\n for i in query_dict.get('intermediates', []):\n if i[metric] is not None:\n nni.report_intermediate_result(i[metric] * scale)\n nni.report_final_result(query_dict[metric] * scale)\n return query_dict[metric]", "def run_query(self, start_time, end_time, interval, metric_name, campaign, **kwargs):\n data = self._irl_artifacts_.run_query(start_time, end_time, interval, metric_name, campaign, **kwargs)\n metrics = data[0] \n times = data[1]\n \n \"\"\" Get the totals for campaign views and donations \"\"\"\n data = self._irl_totals_.run_query(start_time, end_time, interval, metric_name, campaign, **kwargs)\n metrics_total = data[0] \n times_total = data[1]\n self._results_ = data[2]\n \n \"\"\" Combine the results for the campaign totals with (banner, landing page, campaign) \"\"\"\n for key in metrics_total.keys():\n metrics[key] = metrics_total[key]\n times[key] = times_total[key]\n \n return [metrics, times]", "def fill_artifacts_at_runtime(self, args):\n for j in self.jobs:\n j.fill_artifacts_at_runtime(args)", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics", "def _get_parallel_arguments(self, metapaths, start_idxs, end_idxs, start_type,\n end_type, matrices, verbose, walks=False):\n mats_subset_start, mats_subset_end = self._subset_matrices(matrices, start_idxs,\n end_idxs, start_type, end_type)\n\n # Prepare functions for parallel processing\n arguments = []\n for mp in metapaths:\n to_multiply = mt.get_matrices_to_multiply(mp, self.metapaths,\n matrices, mats_subset_start, mats_subset_end)\n if not walks:\n edges = mt.get_edge_names(mp, self.metapaths)\n arguments.append({'edges': edges, 'to_multiply': to_multiply,\n 'start_idxs': start_idxs, 'end_idxs': end_idxs, 'verbose': False})\n else:\n arguments.append({'to_multiply': to_multiply})\n return arguments", "def compute_metrics(self, results: list) -> dict:", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def run(self, inputIn):\n measureList = self.inputToInternal(inputIn)\n outputDict = {}\n assert(len(self.features) == len(measureList))\n for metricInstance in self.metricsDict.values():\n metricEngine = MetricDistributor.factory.returnInstance('MetricDistributor', metricInstance)\n for cnt in range(len(self.targets)):\n nodeName = (str(self.targets[cnt]) + '_' + str(self.features[cnt])).replace(\"|\",\"_\")\n varName = metricInstance.name + '|' + nodeName\n output = metricEngine.evaluate(measureList[cnt], weights=self.weight, multiOutput=self.multiOutput)\n outputDict[varName] = np.atleast_1d(output)\n return outputDict", "def solve_filter_metrics(self):\n\n if 'metrics' in self.filter_request:\n filter_metrics = self.filter_request['metrics']\n metrics_request = {}\n\n temp = filter_metrics.split(',')\n for i in temp:\n metrics_request[i.strip()] = None\n\n for i in range(len(self.list_pack)):\n self.apply_filter_metrics(i, metrics_request.copy())", "def collect_metrics() -> Tuple[Dict[str, Dict[str, Any]], Dict[str, List[str]]]:\n metric_docs: Dict[str, Dict[str, Any]] = {}\n metrics_by_integration: DefaultDict[str, List[str]] = defaultdict(list)\n # Reverse to keep backwards-compatible behavior with old script that kept\n # the last metric seen.\n for metric_yaml_file in sorted(INTEGRATIONS_PATH.glob(\"*/metrics.yaml\")):\n if \"Example\" in str(metric_yaml_file):\n continue\n\n for metric_name, metric in (yaml.safe_load(metric_yaml_file.read_text(encoding=\"utf-8\")) or {}).items():\n metrics_by_integration[metric_yaml_file.parent.name].append(metric_name)\n\n if metric_name in metric_docs:\n # print(f\"WARNING metric {metric_name} is duplicated, info will be taken from first one processed only\")\n continue\n\n desc = \"\"\n if \"description\" in metric:\n desc = metric[\"description\"]\n del metric[\"description\"]\n metric_docs[metric_name] = {\"yaml\": metric, \"markdown\": desc}\n return metric_docs, dict(metrics_by_integration)", "def build_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang):\n # query the metrics table\n build_metrics_start_time = time.time()\n metrics, metrics_columns = get_metrics(session, fill_id, population_id, properties_id, aggregations_id, label_lang)\n build_metrics_query_end_time = time.time()\n\n # make a nested dictionary represented the metrics\n metrics_response, represented_biases = build_gap_response(properties_id, metrics, metrics_columns, label_lang,\n session)\n build_metrics_grouping_end_time = time.time()\n\n # timing\n query_metrics_seconds_taken = build_metrics_query_end_time - build_metrics_start_time\n group_metrics_seconds_taken = build_metrics_grouping_end_time - build_metrics_query_end_time\n log.debug(f\"Querying metrics repsponse took {'%.3f' % query_metrics_seconds_taken} seconds\")\n log.debug(f\"Grouping metrics repsponse took {'%.3f' % group_metrics_seconds_taken} seconds\")\n return metrics_response, represented_biases", "def core(self, ids, start_date, end_date, metrics, dimensions=None,\r\n sort=None, filters=None, segment=None, start_index=None,\r\n max_results=None, fields=None, prettyPrint=None, userIp=None,\r\n quotaUser=None, access_token=None, key=None):\r\n params = base.get_params(None, locals(),\r\n translate_param=translate_param)\r\n request = http.Request('GET', self.get_url('ga'), params)\r\n\r\n return request, parsers.parse_json", "def get_multitask_metrics(metric_tasks = ()):\n\n @flax.struct.dataclass\n class MultiTaskMetric(metrics.Metric):\n \"\"\"MultiTaskMetric.\n\n This metric aggregates sub-metrics in the metric_dict and return the metrics\n of all of them by calling them separately.\n\n Attributes:\n tasks: A sequence of tasks to compute metrics over.\n \"\"\"\n tasks: Tasks = metric_tasks\n\n @classmethod\n @gin_utils.allow_remapping(name='get_multitask_metrics')\n def from_model_output(cls, outputs,\n labels):\n \"\"\"Accumulates model outputs for evaluation.\n\n Args:\n outputs: A dictionary with the following structure:\n key name: Task name.\n value content: A dictionary to corresponding task specific outputs.\n labels: A dictionary with the following structure:\n key name: Task name.\n value content: A dictionary corresponding task specific labels.\n\n Returns:\n A metric object initialized from the outputs and labels.\n\n Raises:\n KeyError: Missing task-specific outputs or labels.\n \"\"\"\n new_tasks = []\n for task in cls.tasks:\n task_outputs, task_labels = (\n task.filter_by_task(outputs), task.filter_by_task(labels))\n if not task_outputs:\n raise KeyError(f'No task outputs for task: {task.name}!')\n if task_labels is None:\n raise KeyError(f'No task labels for task: {task.name}!')\n\n metric = task.metric.from_model_output(task_outputs, task_labels)\n new_tasks.append(type(task)(metric=metric))\n\n return cls(tasks=new_tasks)\n\n def merge(self, other):\n new_tasks = []\n assert len(self.tasks) == len(other.tasks)\n for task, other_task in zip(self.tasks, other.tasks):\n metric = task.metric.merge(other_task.metric)\n new_tasks.append(type(task)(metric=metric))\n\n return type(self)(tasks=new_tasks)\n\n def reduce(self):\n new_tasks = []\n for task in self.tasks:\n metric = task.metric.reduce()\n new_tasks.append(type(task)(metric=metric))\n\n return type(self)(tasks=new_tasks)\n\n def compute(self):\n output_metric = {}\n for task in self.tasks:\n task_metric = task.metric.compute()\n output_metric.update(task.prepend_by_task(task_metric))\n\n return output_metric\n\n return MultiTaskMetric", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def _(\n self,\n metric_type: str,\n metrics: Metrics,\n column,\n data_frame_list,\n *args,\n **kwargs,\n ):\n col_metric = None\n for data_frame in data_frame_list:\n col_metric = metrics(column).dl_query(data_frame)\n if not col_metric:\n return None\n return {metrics.name(): col_metric}", "def optimize_metrics(self,\n metrics: list = None,\n verbose: bool = True):\n\n if metrics is None:\n metrics = self._supported_metrics\n else:\n metrics = [metric.lower() for metric in metrics]\n assert all(metric in self._supported_metrics for metric in metrics)\n for i in metrics:\n super(ThresholdOptimizer, self).__getattribute__(f'get_best_{i}_metrics')(verbose=verbose)", "def prepare_multiple_perf_metrics(run_dict):\n multiple_perf_metrics = {}\n for run_label, run_name in run_dict.items():\n output_parser = OutputParser(run_name, use_most_recent=False)\n perf_metrics = performance_calculations.performance_metrics(output_parser)\n multiple_perf_metrics[run_label] = perf_metrics\n return multiple_perf_metrics", "def init_metric_dict(self, metrics=[\"\"], phases=[\"train\", \"val\"]):\n metric_dict = {phase: {metric: [] for metric in metrics} for phase in phases}\n return metric_dict", "def _resolve_multi(self, interpreter, requirements, find_links):\n python_setup = PythonSetup.global_instance()\n python_repos = PythonRepos.global_instance()\n distributions = {}\n fetchers = python_repos.get_fetchers()\n fetchers.extend(Fetcher([path]) for path in find_links)\n\n for platform in python_setup.platforms:\n requirements_cache_dir = os.path.join(python_setup.resolver_cache_dir,\n str(interpreter.identity))\n distributions[platform] = resolve(\n requirements=[req.requirement for req in requirements],\n interpreter=interpreter,\n fetchers=fetchers,\n platform=None if platform == 'current' else platform,\n context=python_repos.get_network_context(),\n cache=requirements_cache_dir,\n cache_ttl=python_setup.resolver_cache_ttl)\n\n return distributions", "def calculate_metrics(jobs, metrics_names):\n metrics_def_dict = {mn: {'metric': mn.split('_')[0], 'agg': mn.split('_')[1], 'data': [], 'value': -1} for mn in metrics_names}\n\n for job in jobs:\n if job['category'] == 'run' and job['jobstatus'] == 'finished':\n for mn, mdata in metrics_def_dict.items():\n if 'per' in mdata['metric']:\n if mdata['metric'].split('per')[0] in job and mdata['metric'].split('per')[1] in job and job[mdata['metric'].split('per')[1]] > 0:\n mdata['data'].append(job[mdata['metric'].split('per')[0]]/(1.0*job[mdata['metric'].split('per')[1]]))\n elif mdata['metric'] in job and job[mdata['metric']]:\n mdata['data'].append(job[mdata['metric']])\n\n for mn, mdata in metrics_def_dict.items():\n if 'avg' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])/(1.0*len(mdata['data'])) if len(mdata['data']) > 0 else -1\n if 'sum' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])\n\n metrics = {}\n for mn, mdata in metrics_def_dict.items():\n if mdata['value'] > 0:\n if 'percent' in mdata['agg']:\n metrics[mn] = round(mdata['value'] * 100.0, 2)\n else:\n metrics[mn] = round(mdata['value'], 2)\n\n return metrics" ]
[ "0.5766406", "0.53083754", "0.5228133", "0.520503", "0.52048165", "0.5184909", "0.5151796", "0.5131219", "0.5057866", "0.5052298", "0.5040867", "0.50277853", "0.5003099", "0.49977538", "0.49886754", "0.49603522", "0.49507537", "0.49436116", "0.4934606", "0.49135163", "0.49083295", "0.4904018", "0.49010938", "0.4874759", "0.48598823", "0.48520008", "0.483729", "0.48113585", "0.4809755", "0.48059162" ]
0.6327868
0
Convert the values in the named column to the given date_format, and split on that
def _split_on_converted_datetime( self, table_name: str, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", ): return ( sa.func.strftime( date_format_string, sa.column(column_name), ) == batch_identifiers[column_name] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_date(X, date_column):\r\n X.copy()\r\n X[date_column] = pd.to_datetime(X[date_column])\r\n X['Month'] = X[date_column].dt.month\r\n X['Day'] = X[date_column].dt.day\r\n X['Year'] = X[date_column].dt.year\r\n X = X.drop(columns=date_column)\r\n return X", "def split_on_converted_datetime(\n df,\n column_name: str,\n batch_identifiers: dict,\n date_format_string: str = \"yyyy-MM-dd\",\n ) -> pyspark.DataFrame:\n matching_string = batch_identifiers[column_name]\n res = (\n df.withColumn(\n \"date_time_tmp\", F.from_unixtime(F.col(column_name), date_format_string)\n )\n .filter(F.col(\"date_time_tmp\") == matching_string)\n .drop(\"date_time_tmp\")\n )\n return res", "def separate_date(x):\n x[\"SALE DAY\"] = x.apply(get_day, axis=1)\n x[\"SALE MONTH\"] = x.apply(get_month, axis=1)\n x[\"SALE YEAR\"] = x.apply(get_year, axis=1)", "def __split_date(self, date: str) -> list:\n parts = date.split(\"-\")\n return parts", "def split_date(df):\n df_date = df['week_ending'].str.split('-', expand=True)\n df['Year'] = df_date[0]\n df['Month'] = df_date[1]\n df['Day'] = df_date[2]\n\n # sort data set by date\n df = df.sort_values(by = ['Year','Month','Day'],ascending=(False,False,False))\n\n # convert date type\n df['Year']=df['Year'].astype(int)\n df['Month']=df['Month'].astype(int)\n df['Day']=df['Day'].astype(int)\n\n # convert date to timestamp\n #df['week_ending']=df['week_ending'].apply(lambda x:datetime.datetime.strptime(x,'%Y-%m-%d'))\n df['week_ending']=pd.to_datetime(df['week_ending'])\n return(df)", "def date_formatting(df):\n from datetime import datetime\n sub_df = df.iloc[:, 1:]\n for i in range(0, len(sub_df.columns)):\n if i <= 2:\n pass\n else:\n date_string = sub_df.columns[i]\n d1 = datetime.date(datetime.strptime(date_string, '%m/%d/%y'))\n d2 = str(d1)\n sub_df.rename(columns={date_string: d2}, inplace=True)\n return sub_df", "def split_date(value):\n if not is_valid_date(value):\n return ('', '', '')\n\n splited = value.split('-')\n\n try:\n year = splited[0]\n except IndexError:\n year = ''\n\n try:\n month = splited[1]\n except IndexError:\n month = ''\n\n try:\n day = splited[2]\n except IndexError:\n day = ''\n\n return (year, month, day)", "def split_on_year_and_month_and_day(\n self,\n df: pyspark.DataFrame,\n column_name: str,\n batch_identifiers: dict,\n ) -> pyspark.DataFrame:\n return self.split_on_date_parts(\n df=df,\n column_name=column_name,\n batch_identifiers=batch_identifiers,\n date_parts=[DatePart.YEAR, DatePart.MONTH, DatePart.DAY],\n )", "def _parse_date_columns(data_frame, parse_dates):\n # handle non-list entries for parse_dates gracefully\n if parse_dates is True or parse_dates is None or parse_dates is False:\n parse_dates = []\n\n if not hasattr(parse_dates, '__iter__'):\n parse_dates = [parse_dates]\n\n for col_name in parse_dates:\n df_col = data_frame[col_name]\n try:\n fmt = parse_dates[col_name]\n except TypeError:\n fmt = None\n data_frame[col_name] = _handle_date_column(df_col, format=fmt)\n\n return data_frame", "def updatetotimeformat(tweetdf, colname):\r\n for i in range(len(tweetdf)):\r\n tweetdf.loc[i,colname] = parser.parse(tweetdf.loc[i,colname])\r\n \r\n return tweetdf", "def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def split_on_date_parts(\n self,\n df: pyspark.DataFrame,\n column_name: str,\n batch_identifiers: dict,\n date_parts: Union[List[DatePart], List[str]],\n ) -> pyspark.DataFrame:\n self._validate_date_parts(date_parts)\n\n date_parts = self._convert_date_parts(date_parts)\n\n column_batch_identifiers: dict = batch_identifiers[column_name]\n\n date_parts_dict: dict = (\n self._convert_datetime_batch_identifiers_to_date_parts_dict(\n column_batch_identifiers, date_parts\n )\n )\n\n for date_part, date_part_value in date_parts_dict.items():\n df = df.filter(\n getattr(F, self._convert_date_part_to_spark_equivalent(date_part))(\n F.col(column_name)\n )\n == date_part_value\n )\n return df", "def __parse_dates(df):\n\t\tdf['release_date'] = pd.to_datetime(df['release_date'])\n\t\tdf['release_date'] = df['release_date'].fillna(df['release_date'].median())\n\t\tdf['year'] = df['release_date'].dt.year\n\t\tdf['month'] = df['release_date'].dt.month\n\t\tdf['day'] = df['release_date'].dt.weekday\n\t\tdf = pd.get_dummies(df, columns=['month', 'day'])\n\t\treturn df", "def convert_column_str2dates(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.str2date(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.str2date(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def get_dates(self, sr_df):\n return [\n date_obj.strftime(self.DATE_FORMAT) for date_obj in sr_df.index\n ]", "def csv_handle_changedate(self,col_name,col_type):\n table = self.csv_dataframe\n if col_type == 'date':\n table[col_name] = pd.to_datetime(table[col_name]).dt.date\n elif col_type == 'datetime':\n table[col_name] = pd.to_datetime(table[col_name]).dt.to_pydatetime()\n elif col_type == 'year':\n table[col_name] = pd.to_datetime(table[col_name].apply(lambda x: str(x)+'/1/1')).dt.date", "def date_parser(dates):\n return([item.split()[0] for item in dates])\n pass", "def parse_dates(df, args=(\"%Y-%m-%dT%H:%M:%S.%f\",)):\n try:\n datetime_col = df['reported_date'].apply(dt.datetime.strptime, args=args)\n except ValueError:\n datetime_col = pd.to_datetime(df.reported_date)\n\n return df.assign(reported_date=pd.Series(datetime_col).values)", "def datetime_column(filepath, skiprows, skipcolumns):\n df = pd.read_csv(filepath, skiprows=skiprows)\n df = df.drop(columns = skipcolumns)\n# df = df.head(10)\n \n# return df\n\n def try_parse(df):\n# print(df.iloc[1, :])\n # try parsing some rows from each column as date\n head = df.head()\n tail = df.tail()\n for column in df.columns:\n try:\n# print(dateutil.parser.parse(df[column].iloc[-1]))\n dt_head = dateutil.parser.parse(head[column].iloc[-1])\n dt_tail = dateutil.parser.parse(tail[column].iloc[-1])\n# print('possible datetime')\n# if not date.time() == datetime.time():\n if not dt_head.time() == dt_tail.time():\n if not dt_head.date() == dt_tail.date():\n # time seems to be present (not default parser value)\n return column\n except:\n continue\n return None\n \n # try without modifying values\n rv = try_parse(df=df)\n if rv:\n return rv\n \n # try modifying values\n chars = ['-', '_', '/', '#']\n for char in chars:\n dfc = df.copy()\n for col in dfc.columns:\n try:\n dfc[col] = dfc[col].str.split(char).str.join(' ')\n except:\n pass # will only work for str type\n# print(char, dfc.iloc[1, :])\n rv = try_parse(df=dfc)\n if rv:\n return rv", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def split_fr_date(date_str):\n day=date_str[:2]\n month=date_str[3:5]\n try:\n #date coded on 4 digits\n year=date_str[6:]\n except:\n #date coded on 2 digits\n year=date_str[4:]\n\n return year, month, day", "def parse_to_date_column(df: DataFrame, columnName: str, dateFmt: str, newColumnName: Union[str, None] = None) -> DataFrame:\n\n return df.withColumn(newColumnName if newColumnName else columnName,\n to_date(col(columnName), dateFmt))", "def validate_date(column_name, value, date_format, column_data_type=\"date\"):\n value = value.replace(\"T\", \" \")\n dtpart = value.split(\" \")\n value = dtpart[0]\n try:\n datetime.strptime(value, date_format)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def _convert_column_to_date(dframe, column):\n try:\n return dframe[column].apply(parse_date)\n except AttributeError:\n # it is already a datetime\n pass\n except ValueError:\n # it is not a correctly formatted date\n pass\n except OverflowError:\n # it is a number that is too large to be a date\n pass", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df", "def date_wrangler(self, date_col):\n data = self.copy() # Create a copy of the DataFrame\n\n # Convert target column to datetime\n data[date_col] = pd.to_datetime(\n data[date_col], infer_datetime_format=True\n )\n\n # Split column into datetime components\n data[f\"{date_col}_year\"] = data[date_col].dt.year\n data[f\"{date_col}_year\"] = data[date_col].dt.month\n data[f\"{date_col}_year\"] = data[date_col].dt.day\n\n # Drop original column\n data = data.drop(columns=date_col)\n\n return data", "def fix_dates(line, date_names, headers):\n date_idxs = [headers.index(date_name) for date_name in date_names]\n for date_idx in date_idxs:\n val = line[date_idx]\n if val:\n # Forget times if they appear\n val = val.split(' ')[0]\n\n # Sometimes, miraculously, the val is *not* in American format:\n try:\n datetime.datetime.strptime(val, '%Y-%m-%d')\n # In the correct format!\n line[date_idx] = val\n continue\n except ValueError:\n # In the American format\n pass\n\n try:\n val = datetime.datetime.strptime(val, '%m/%d/%Y')\n except ValueError:\n # No idea what format this is in. Warn and return None\n print(\"Unreadable date {}\".format(val))\n line[date_idx] = None\n continue\n\n # Sometimes people write dates like 4/1/15. Bump the years to the modern era\n if val.year < 50:\n val = datetime.datetime(val.year + 2000, val.month, val.day)\n elif val.year < 100:\n val = datetime.datetime(val.year + 1900, val.month, val.day)\n val = val.strftime('%Y-%m-%d')\n line[date_idx] = val", "def get_parsed_date(row):\n input_years = row.get('date', '').strip()\n return parse_year(input_years)" ]
[ "0.6063431", "0.59881413", "0.59291905", "0.5892997", "0.58773583", "0.5724132", "0.56537443", "0.5642548", "0.55780613", "0.5577633", "0.55532646", "0.54812294", "0.54616386", "0.5457874", "0.5456375", "0.5441208", "0.5415469", "0.5410341", "0.54027736", "0.53965163", "0.5336607", "0.53349316", "0.5282297", "0.52821213", "0.5281", "0.5275838", "0.5273803", "0.52406925", "0.51636", "0.515621" ]
0.6002781
1
Divide the values in the named column by `divisor`, and split on that
def _split_on_divided_integer( self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict ): return ( sa.cast(sa.column(column_name) / divisor, sa.Integer) == batch_identifiers[column_name] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_on_divided_integer(\n df, column_name: str, divisor: int, batch_identifiers: dict\n ):\n matching_divisor = batch_identifiers[column_name]\n res = (\n df.withColumn(\n \"div_temp\",\n (F.col(column_name) / divisor).cast(pyspark.types.IntegerType()),\n )\n .filter(F.col(\"div_temp\") == matching_divisor)\n .drop(\"div_temp\")\n )\n return res", "def divideAll(self, divisor):\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor", "def divideAll(self, divisor):\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor", "def df_division(data, col_name, n_group=5, ascending=False):\n assert col_name in data.columns, '{} is not in columns of data!'.format(col_name)\n assert data[col_name].dtype == 'float' or data[col_name].dtype == 'int', \\\n 'type of {} is not comparable!'.format(col_name)\n\n data.reset_index(drop=True, inplace=True)\n rows = data.shape[0]\n rows_each_group = rows // n_group\n data.sort_values(by=col_name, ascending=ascending, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\n division = []\n for i in range(n_group):\n if not i == n_group-1:\n division.append(data.iloc[i * rows_each_group: (i+1) * rows_each_group, :])\n else:\n division.append(data.iloc[i * rows_each_group:, :])\n\n return division", "def div(a, x):\n return [a[i]/x for i in range(2)]", "def div_value(self, lv, rv):", "def split(data):\n data = sorted(data, key=lambda x: x[0])\n half = len(data)//2\n return (data[half][0]+data[half + 1][0])/2\n print(data)", "def calcGainRatioSplitByColumn(self, data, structure, colIName):\n splitInfo, colIndex = 0, structure[colIName]['index']\n for value in structure[colIName]['values']:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data) if len(newData) != 0 else 1\n splitInfo += (-1) * p * log2(p)\n splitInfo = 1 if splitInfo == 0 else splitInfo\n return round(self.calcInfoGainByColumnSplit(data, structure, colIName) / splitInfo, 3)", "def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out", "def __divideset(rows, column, value):\n split_function = None #Initialize a variable split function.\n if isinstance(value, int) or isinstance(value, float): #Check if value is a number\n #True = the observation's value >= to the splitting criteria. False otherwise\n split_function = lambda row: row[column] >= value\n else:\n #If value is a string, True is where the observation's value == the criteria\n split_function = lambda row:row[column] == value\n \n #Divide the rows into two sets and return them\n set1 = [row for row in rows if split_function(row)]\n set2 = [row for row in rows if not split_function(row)]\n return (set1, set2)", "def divide(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas :\n\t\t\tresult /= data\n\n\t\treturn result", "def __dividePandas(df, column, value):\n if isinstance(value, int) or isinstance(value, float): #Check if value is a #\n #Divide the rows into two sets and return them\n set1 = df[df[column] >= value] #Observations greater than or equal to value\n set2 = df[df[column] < value] #Observations less than value are in set2\n else:\n set1 = df[df[column] == value] #Observations equal to value are in set 1\n set2 = df[df[column] != value] #Observations not equal to value are in set2 \n return (set1, set2)", "def __floordiv__(self, other: Any) -> ColumnOperators:\n return self.operate(floordiv, other)", "def divide(numbers):\n quot = numbers[0]\n for i in numbers[1:]:\n quot = quot / i\n return quot", "def ratio(self, dims, divider_dim, min_value=0.1):\n divider = self.get_cols(divider_dim)[0].copy()\n if min_value:\n divider[divider < min_value] = min_value\n new_data = self.data.copy()\n idx = [self.dims.index(dim) for dim in dims]\n new_data[:,idx] = new_data[:,idx] / np.array([divider]).T\n return DataTable(new_data, self.dims, self.legends, self.tags.copy())", "def safe_div(numerator, denominator, name='safe_div'):\n return array_ops.where(\n math_ops.equal(denominator, 0),\n array_ops.zeros_like(numerator),\n math_ops.div(numerator, denominator),\n name=name)", "def divide(self, divisor: float) -> float:\n if self.check_type_not_complex(number=divisor):\n try:\n self.__memory /= divisor\n return self.__memory\n except ZeroDivisionError:\n print(\"Division by zero is not allowed\")\n return self.__memory\n return self.__memory", "def divideList(L):\n for x in range(len(L)):\n L[x] = L[x]/100.0\n return L", "def divide(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = result / n\n return result", "def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets", "def divide_numbers(value_a, value_b):\n # this is a shorthand way to create the division object and added it history in one line\n Calculator.add_calculation_to_history(Division.create(value_a, value_b))\n return Calculator.get_result_of_last_calculation_added_to_history()", "def divide(self, val):\n ancien_pri = 999999\n ancien_chunck = 1\n for pri in prime_array:\n if val % pri == 0 and pri >= self.MINIMUM_NUMBER_OF_CHUNK and val / pri < self.MAXIMUM_SIZE_PER_CHUNK:\n ancien_pri = int(pri)\n ancien_chunck = int(val / pri)\n print({\"size\": ancien_pri, \"chunck\": ancien_chunck})\n self.divide(ancien_chunck)\n\n return {\"size\": ancien_pri, \"chunck\": ancien_chunck}", "def __div__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"/\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"/ scalar)\"\n x /= value\n return x", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def split(self, amount):\n split_objs = list(self.all())\n if not split_objs:\n raise NoSplitsFoundForRecurringCost()\n\n portions = [split_obj.portion for split_obj in split_objs]\n\n split_amounts = ratio_split(amount, portions)\n return [\n (split_objs[i], split_amount)\n for i, split_amount\n in enumerate(split_amounts)\n ]", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out", "def mixed_divide_by_events_lenght(data_df:pd.DataFrame, path_column, sizes_filename=None):\n sizes = None\n if sizes_filename is not None:\n if os.path.exists(sizes_filename):\n with open(sizes_filename, 'rb') as sizes_handler:\n sizes = pickle.load(sizes_handler)\n if sizes is None:\n sizes = dict()\n aux = 0\n for index, row in data_df.iterrows():\n sys.stderr.write('\\rdone {0:%}'.format(aux / len(data_df)))\n with open(row[path_column], 'rb') as file_handler:\n try:\n values = pickle.load(file_handler)\n except Exception as e:\n print(row[path_column])\n print(\"test\")\n print(e)\n raise ValueError()\n if len(values) not in sizes.keys():\n sizes[len(values)] = []\n sizes[len(values)].append(row['episode'])\n aux += 1\n if sizes_filename is not None:\n with open(sizes_filename, 'wb') as sizes_handler:\n pickle.dump(sizes, sizes_handler)\n return sizes", "def divide(numerator, denominator):\n ensure_divisibility(numerator, denominator)\n return numerator // denominator", "def divide(self):\n return self._do_calc(self.divider)" ]
[ "0.7624918", "0.63281494", "0.63281494", "0.6198992", "0.59379506", "0.5877097", "0.5860313", "0.5730168", "0.5702097", "0.5687075", "0.56626314", "0.5631788", "0.5629911", "0.5626099", "0.5592552", "0.55592173", "0.55007976", "0.5481256", "0.54458344", "0.5437595", "0.5420304", "0.54015005", "0.5393597", "0.53754824", "0.53750604", "0.5371639", "0.5353598", "0.5328522", "0.5318524", "0.5277258" ]
0.7020373
1
Split on the hashed value of the named column
def _split_on_hashed_column( self, table_name: str, column_name: str, hash_digits: int, batch_identifiers: dict, ): return ( sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits) == batch_identifiers[column_name] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_on_hashed_column(\n df,\n column_name: str,\n hash_digits: int,\n batch_identifiers: dict,\n hash_function_name: str = \"sha256\",\n ):\n try:\n getattr(hashlib, hash_function_name)\n except (TypeError, AttributeError):\n raise (\n gx_exceptions.ExecutionEngineError(\n f\"\"\"The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"\n )\n )\n\n def _encrypt_value(to_encode):\n hash_func = getattr(hashlib, hash_function_name)\n hashed_value = hash_func(to_encode.encode()).hexdigest()[-1 * hash_digits :]\n return hashed_value\n\n encrypt_udf = F.udf(_encrypt_value, pyspark.types.StringType())\n res = (\n df.withColumn(\"encrypted_value\", encrypt_udf(column_name))\n .filter(F.col(\"encrypted_value\") == batch_identifiers[\"hash_value\"])\n .drop(\"encrypted_value\")\n )\n return res", "def _split_on_column_value(\n self, table_name: str, column_name: str, batch_identifiers: dict\n ):\n\n return sa.column(column_name) == batch_identifiers[column_name]", "def split(value, key):\n return str(value).split(key)", "def split_column(df,col_name,reg_ex=',',keep=False):\n # https://stackoverflow.com/a/51680292/5847441\n df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\\\n .select(col_name,concat(lit(col_name),col('pos').cast('string')).alias('name'),'val')\\\n .groupBy(col_name).pivot('name').agg(first('val'))\n if keep:\n return df\n else:\n return df.drop(col_name)", "def tidy_split(df, column='Members', sep=', '):\n\n indexes = []\n new_values = []\n for i, presplit in enumerate(df[column].astype(str)):\n for value in presplit.split(sep):\n indexes.append(i)\n new_values.append(value)\n new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a warning\n new_df[column] = new_values\n df = new_df.reset_index(drop=True)\n return df", "def split(value, delimiter):\n return value.split(delimiter)", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def tidy_split(df, column, sep='|', keep=False):\r\n indexes = list()\r\n new_values = list()\r\n df = df.dropna(subset=[column])\r\n for i, presplit in enumerate(df[column].astype(str)):\r\n values = presplit.split(sep)\r\n if keep and len(values) > 1:\r\n indexes.append(i)\r\n new_values.append(presplit)\r\n for value in values:\r\n indexes.append(i)\r\n new_values.append(value)\r\n new_df = df.iloc[indexes, :].copy()\r\n new_df[column] = new_values\r\n return new_df", "def _split(self, sql):\n\n placeholder = \"\\ufffc\" # unicode object replacement character\n\n if self._delimiter == ';':\n return sqlparse.split(sql)\n\n # We must find a string that original sql does not contain.\n # Most likely, our placeholder is enough, but if not, keep looking\n while placeholder in sql:\n placeholder += placeholder[0]\n sql = sql.replace(';', placeholder)\n sql = sql.replace(self._delimiter, ';')\n\n split = sqlparse.split(sql)\n\n return [\n stmt.replace(';', self._delimiter).replace(placeholder, ';')\n for stmt in split\n ]", "def split(self, X):", "def word_splitter(df):\n \n df['temp_column']= df['Tweets'].str.lower() #temporary column that contains the tweets in lowercase\n df['Split Tweets']= df['temp_column'].str.rsplit() #splitting the words of temporal column into a new column 'Split tweets'\n df = df.drop('temp_column', 1) #delete temporal column\n\n return df", "def split_id(self):\n return self._split_id", "def splitline (self, line):\n\t\treturn line.split('\\t')", "def split_on_column(df, index_column, split_column, new_index_column, new_split_column, split_char=';'):\n # Produce a Series with index from index_column and values from split_column (split by split_char).\n # Each series constructor is called with (value, [split1, split2, split3]) which produces [(value, split1), (value, split2), (value, split3)]\n split = pd.concat([pd.Series(row[index_column], row[split_column].split(split_char))\n for _, row in df.iterrows()]).reset_index()\n # Add column names\n split.columns = [new_split_column, new_index_column]\n # Return reversed columns\n return split[[new_index_column, new_split_column]]", "def splitkv(s):\n a=re.split('(\\w*)\\s*=\\s*\"([^=\"]*)\"\\s*', s)\n a=[ t for t in a if t!='']\n return a", "def split_on_column_value(\n df, column_name: str, batch_identifiers: dict\n ) -> pyspark.DataFrame:\n return df.filter(F.col(column_name) == batch_identifiers[column_name])", "def _splitFieldValue(self, line):\n found = self.FIELDVALUE.findall(line)\n if found:\n fieldName, value = found[0]\n if fieldName in self.C.ADAPTER_COMMAFIELDS:\n value = self.COMMASPLIT.findall(value)[:-1] # Split and remove last empty part\n return fieldName, value\n return None, None # No field name match on this line.", "def _split_on_divided_integer(\n self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict\n ):\n\n return (\n sa.cast(sa.column(column_name) / divisor, sa.Integer)\n == batch_identifiers[column_name]\n )", "def smart_split(x):\n return R_SPLIT_DELIM.split(x)", "def word_splitter(df):\n #Create column, split and make strings lowercase.\n df['Split Tweets'] = df['Tweets'].str.lower().str.split() \n return df", "def split_text_id_columns(line: str, id_column: bool = False) -> str:\n pattern = re.compile(r\"(\\d{3,5}\\s+)(.+)\")\n match = re.match(pattern, line)\n assert match is not None\n if id_column:\n return match.group(1)\n return match.group(2)", "def split_table_name(table_name):\n try:\n return table_name.split(\";\")\n except ValueError:\n raise IncorrectTableNameException(\n \"Given table name '{0}' is incorrect. Table\"\n \" name should have form: '<app_label>;<model_name>'\".format(\n table_name\n )\n )", "def word_splitter(df):\n result = []\n l1 = df['Tweets']\n for tweet in l1:\n result.append(tweet.lower().split(' '))\n df['Split Tweets'] = result\n return df", "def word_splitter(df):\n df['Split Tweets'] = [i.lower().split() for i in df['Tweets']]\n return df", "def split_columns(l):\n return [l[:3], l[3:7], l[7:12], l[12:16], l[16:]]", "def split_into_tokens(dataset, delimiter=\"\"):\n pass", "def Split(S):\n # for each char do\n # if c splits S into s1 and s2\n # then return {s1, s2}\n \n # return S", "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def _split_on_mod_integer(\n self, table_name: str, column_name: str, mod: int, batch_identifiers: dict\n ):\n\n return sa.column(column_name) % mod == batch_identifiers[column_name]" ]
[ "0.65764296", "0.5979117", "0.58276886", "0.576672", "0.5747084", "0.5704968", "0.56568956", "0.5436421", "0.5386944", "0.53809637", "0.5376536", "0.53194606", "0.52639484", "0.5231733", "0.52234346", "0.52208877", "0.52128637", "0.5200018", "0.5196556", "0.5189515", "0.51760787", "0.5159063", "0.51504296", "0.5146065", "0.5143616", "0.5143371", "0.5137477", "0.51185477", "0.5113853", "0.51108354" ]
0.70502675
0
Take the mod of named column, and only keep rows that match the given value
def _sample_using_mod( self, column_name, mod: int, value: int, ): return sa.column(column_name) % mod == value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_column(col, row):\n return col == column", "def split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict):\n matching_mod_value = batch_identifiers[column_name]\n res = (\n df.withColumn(\n \"mod_temp\", (F.col(column_name) % mod).cast(pyspark.types.IntegerType())\n )\n .filter(F.col(\"mod_temp\") == matching_mod_value)\n .drop(\"mod_temp\")\n )\n return res", "def _split_on_mod_integer(\n self, table_name: str, column_name: str, mod: int, batch_identifiers: dict\n ):\n\n return sa.column(column_name) % mod == batch_identifiers[column_name]", "def extract_relevant_rows(df, column_name, column_value, not_equal=False):\n\n if not_equal:\n return df.loc[df[column_name] != column_value]\n\n return df.loc[df[column_name] == column_value]", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def remove(df, pattern):\n return df[~df.index.isin(df.query(pattern).index)]", "def filter_row(col, rw):\n return rw == row", "def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)", "def filter_column(self, column=None, values=[]):\n if column is None:\n return self.df\n\n columns = self.df.columns\n if column not in columns:\n raise ColumnNameError(\"Column does not exist in the dataframe!\")\n\n return self.df.filter(col(column).isin(values))", "def delete_entries(df, column, values):\n for val in values:\n dropindex = df[df[column] == val].index\n df.drop(index = dropindex, inplace = True)", "def filter_table_by_column(\n self, table: Table, column: Column, operator: str, value: Any\n ):\n self._requires_table(table)\n\n condition = to_condition(operator, value)\n\n before = len(table)\n table.filter_by_column(column, condition)\n after = len(table)\n\n self.logger.info(\"Filtered %d rows\", after - before)", "def split_on_column_value(\n df, column_name: str, batch_identifiers: dict\n ) -> pyspark.DataFrame:\n return df.filter(F.col(column_name) == batch_identifiers[column_name])", "def pattern_search(pattern, dataset, column):\n # Filter\n dataset = dataset[dataset[column].str.contains(pattern, regex=True)]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Return\n return dataset", "def _split_on_column_value(\n self, table_name: str, column_name: str, batch_identifiers: dict\n ):\n\n return sa.column(column_name) == batch_identifiers[column_name]", "def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True", "def drop_uniform_slice_from_dataframe(df, value, axis=0):\n\n if axis == 0:\n dropped = (df == value).all(axis=0)\n if any(dropped):\n print('Removed {} column index(ices) whose values are all {}.'.\n format(dropped.sum(), value))\n return df.ix[:, ~dropped]\n\n elif axis == 1:\n dropped = (df == value).all(axis=1)\n if any(dropped):\n print('Removed {} row index(ices) whose values are all {}.'.format(\n dropped.sum(), value))\n return df.ix[~dropped, :]", "def keep(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name in columns\n },\n self._mask,\n )", "def search_column_with_constraint(db, table, column, condition_col, condition_val):\n condition = condition_col + \" = '\" + str(condition_val) + \"'\"\n result = select_columns(db, table, column, condition=condition)\n\n return result", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def filter_by_names(df, names_list):\n return df.ix[names_list]", "def where_helper(temp_table, all_columns, where):\n\ttry:\n\t\tcomparison = where.tokens[2]\t\t\t\t\t\t# comparison = \"A=8\";\n\t\tcomparison.tokens = [x for x in comparison.tokens if not x.is_whitespace()]\t\t# No more white spaces\t\t\t\n\t\tkey = str(comparison.tokens[0])\t\t\t\t\t\t# key = \"A\"\n\t\t\n\t\tif '.' not in key:\n\t\t\tkey = check_overlapping_fields(all_columns, key)\n\t\ttry:\n\t\t\tvalue = int(str(comparison.tokens[2]))\t\t\t# whether it is an integer value on RHS of comparison or some column\n\t\t\ttemp_table.delete_rows_by_int(key, value, str(comparison.tokens[1]))\n\t\texcept:\n\t\t\tvalue = str(comparison.tokens[2])\n\t\t\tif '.' not in value:\n\t\t\t\tvalue = check_overlapping_fields(all_columns, value)\n\t\t\ttemp_table.delete_rows_by_col(key, value, str(comparison.tokens[1]))\n\texcept:\n\t\traise SqlException(\"Invalid Syntax\")\n\treturn temp_table", "def filter_values(df, value=0, axis=0):\n \n if axis:\n return df.loc[:, (df != value).any(axis=1-axis)]\n else:\n return df.loc[(df != value).any(axis=1-axis)]", "def filter_table_with_keyword(self, table: Table, name: str, *args):\n self._requires_table(table)\n\n def condition(row: Row) -> bool:\n return BuiltIn().run_keyword(name, row, *args)\n\n before = len(table)\n table.filter_all(condition)\n after = len(table)\n\n self.logger.info(\"Removed %d row(s)\", before - after)", "def findExtraColumnMatch(self, column_name):\n column_name = column_name.upper()\n con = self.getMetadataDatabaseConnection()\n matches = []\n results = con.cursor()\n con.cursor().callproc('qiime_assets.find_extra_column_match', [column_name, results])\n #for row in results:\n for row in results:\n matches.append(row[0])\n \n return matches", "def filter_rows(self, **kwargs):\n filtered = self._data.copy()\n for colname, values in kwargs.items():\n values = [values] if type(values) == str else values\n filtered = filtered[filtered[colname].isin(values)]\n return self._copy(filtered)", "def where_number(self, column_name: str, comparison: str, value: jsii.Number) -> \"SpaceDelimitedTextPattern\":\n return jsii.invoke(self, \"whereNumber\", [column_name, comparison, value])", "def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]", "def column_wildcard(self) -> Optional[pulumi.Input['DataCellsFilterColumnWildcardArgs']]:\n return pulumi.get(self, \"column_wildcard\")", "def _filter_valid_id(df, col):\n df = df[(df[col].str.isnumeric().replace({np.nan: False})) &\n (df[col] != '0') &\n (df[col] != 0)]\n return df", "def filter_dataframe_by_composition(df, composition, formula_column=\"Composition\"):\n # Get elements in formula, composition, then filter\n chemsys = set(Composition(composition).keys())\n all_comps = df[formula_column].apply(Composition)\n indices_to_include = [ind for ind, comp in all_comps.items()\n if comp.keys() <= chemsys]\n return df.loc[indices_to_include]" ]
[ "0.6376289", "0.61524767", "0.59794253", "0.5769592", "0.5636871", "0.5633086", "0.558487", "0.5578941", "0.55405074", "0.5523299", "0.54107213", "0.5403629", "0.5289054", "0.52319825", "0.5160314", "0.5159226", "0.51499134", "0.51131636", "0.507911", "0.5066149", "0.50261253", "0.50255114", "0.5023594", "0.4975759", "0.49628094", "0.49590427", "0.4951848", "0.49497837", "0.49406216", "0.49287972" ]
0.6648762
0
Match the values in the named column against value_list, and only keep the matches
def _sample_using_a_list( self, column_name: str, value_list: list, ): return sa.column(column_name).in_(value_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def value_in(table_rows, values=[], col_name=\"\", col_num=-1):\n key = col_name\n if(key==\"\"):\n key = table_rows[0].keys[col_num]\n rst = True\n lst = []\n for i in range(len(table_rows)):\n value = table_rows[i].get_d_value(key)\n if(value not in values):\n rst = False\n lst.append(\"(col:{0},row:{1}:value:{2}\".format(\n key, i, value\n ))\n return rst,\",\".join(lst)", "def create(df,column,list_):\n return df[df[column].isin(list_)]", "def _split_on_multi_column_values(\n self, table_name: str, column_names: List[str], batch_identifiers: dict\n ):\n\n return sa.and_(\n *[\n sa.column(column_name) == column_value\n for column_name, column_value in batch_identifiers.items()\n ]\n )", "def _list(self, val, fld):\n if isinstance(val, (list, tuple)):\n if len(val) == 1:\n return fld == val[0]\n else:\n return fld.in_(val)\n else:\n return fld == val", "def match_list(column, patterns):\n for pattern in patterns:\n if pattern.match(column):\n return True\n return False", "def list_should_contain_value(self,list_,value,msg=None):\r\n\r\n default =\"%s contains value '%s'\" %(seq2str(list_),value)\r\n _verify_condition(vlaue not in list_,default,msg)", "def column_values_in_list(col, test_list):\n test = np.array([c_i in test_list for c_i in col])\n return test", "def remove_values_from_list(self,list_,*values):\r\n for value in values:\r\n while value in list_:\r\n list_.remove(value)", "def filter_names(self, qs, name, value):\n return qs.filter(name__in=value)", "def a_list(test_val: object, test_col: object, valid_values: object) -> object:\n tv_upper = test_val.upper()\n rc: bool = True\n # noinspection PyTypeChecker\n value_list = [x[test_col] for x in valid_values]\n value_list_upper = [x.upper() for x in value_list]\n if tv_upper not in value_list_upper:\n print(f'{test_val} is invalid. Valid values are {str(value_list)}')\n rc = False\n return rc", "def search_all(self, word_list):\n return [k for k,v in self.data_values.iteritems() \n if all(w.lower() in v.lower() for w in word_list)]", "def validateListValue(self, list_name, list_value):\n try:\n con = self.getMetadataDatabaseConnection()\n results = 0\n results = con.cursor().callproc('qiime_assets.validate_list_value', [list_name, list_value, results])\n return results[2]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def _expand_wildcards(self, value_list):\n result = {}\n do_proportion = False\n for v in set(value_list):\n if v.startswith(\"!\"):\n v = v[1:]\n if not v:\n # FIXME(aloga): check this message\n raise exception.CollectorException(\n message=\"Cannot just negate a match!\")\n if \"*\" in v:\n index = \"NOT CONTAINS\"\n else:\n index = \"NOT IN\"\n elif v == \"**\":\n do_proportion = True\n continue\n else:\n if \"*\" in v and v != \"*\":\n index = \"CONTAINS\"\n else:\n index = \"IN\"\n\n if v in set().union(*result.values()):\n # FIXME(aloga): check this message\n raise exception.CollectorException(rule=v)\n result.setdefault(index, set()).add(v)\n return do_proportion, result", "def map_values_to_value_list(value_list, values):\n return [value_list.index(x) for x in values]", "def filter_list(data: List[dict], field: str, selected: List[str]):\n if len(selected):\n return [x for x in data if x[field] in selected]\n else:\n return data", "def isin(self, values: Union[list, dict, IColumn]):\n if isinstance(values, list):\n return self._fromdata(\n {\n self.dtype.fields[i]\n .name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n .isin(values)\n for i in range(self._data.children_size())\n },\n self._mask,\n )\n if isinstance(values, dict):\n self._check_columns(values.keys())\n return self._fromdata(\n {n: c.isin(values[n]) for n, c in self._field_data.items()}\n )\n if isinstance(values, IDataFrame):\n self._check_columns(values.columns)\n return self._fromdata(\n {n: c.isin(values=list(values[n])) for n, c in self._field_data.items()}\n )\n else:\n raise ValueError(\n f\"isin undefined for values of type {type(self).__name__}.\"\n )", "def search_any(self, word_list):\n # Same as search_all except uses the built-in any()\n return [k for k,v in self.data_values.iteritems() \n if any(w.lower() in v.lower() for w in word_list)]", "def find_all_items(items: WebElements, value_list: List[str]=PARAMS_LEAGUES) -> WebElements:\n items_list = []\n for item in items:\n if any([True if word in item.text.lower() else False for word in value_list]):\n items_list.append(item)\n return items_list", "def filter_rows(self, **kwargs):\n filtered = self._data.copy()\n for colname, values in kwargs.items():\n values = [values] if type(values) == str else values\n filtered = filtered[filtered[colname].isin(values)]\n return self._copy(filtered)", "def query_by_value_like(self, table_name, fields_value_mappings):\n\n if self.engine:\n result = []\n if fields_value_mappings:\n table = self.sql_metadata.tables[table_name]\n if table is not None:\n _class = self.mapper.class_\n _query = self.session.query(_class)\n\n for field_value_mapping in fields_value_mappings:\n for attr, value in field_value_mapping.items():\n _result = _query.filter(getattr(_class, attr).like(\"%%%s%%\" % value))\n\n if _result is None:\n continue\n else:\n for _i in _result.all():\n result.append(_i.__dict__)\n\n return result", "def filter_by_isin(df: pd.DataFrame, column: str, values: Iterable) -> pd.DataFrame:\n # First, create a \"map\" series from all possible values in the column => whether they should pass the filter\n all_ids = df[column].unique()\n is_id_relevant = pd.Series(np.zeros(len(all_ids)), index=all_ids).astype('bool') # Default false\n is_id_relevant.loc[values] = True\n\n # Create a boolean mask for column, based on the mapping above. Grab the raw array.\n mask = is_id_relevant[df[column]].values\n # Apply mask\n return df[mask]", "def make_where_in(cls, key, value_list):\n\n return \"%s IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))", "def make_where_not_in(cls, key, value_list):\n\n return \"%s NOT IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))", "def assert_list(self, ref, predicate, values):\n for obj in self.graph.objects(ref, predicate):\n if unicode(obj) in values:\n values.remove(unicode(obj))\n\n self.assertTrue(len(values) == 0, \"Not all expected values were found in graph. remaining: \"\n + \", \".join(values))", "def task_3_find_item_via_value(data: DT, value) -> DT:\n return [dic for dic in data if value in dic.values()]", "def __column_intersect(df, list_):\n return set(list_).intersection(set(df.columns.tolist()))", "def valid_value(self, value):\n for val in value.split(','):\n valid = super(MultiSelectField, self).valid_value(val)\n if not valid:\n return False\n return True", "def values(self, items_list):\n return [self.resolve(value) for value in items_list]", "def remove_values(self, values: Collection[Hashable]) -> bool:\n\t\tany_values_removed = False\n\n\t\tfor value in values:\n\t\t\tif value in self._potential_values:\n\t\t\t\tself._potential_values.remove(value)\n\t\t\t\tany_values_removed = True\n\n\t\treturn any_values_removed" ]
[ "0.6528692", "0.6075162", "0.6061627", "0.59260726", "0.59116757", "0.5900305", "0.5770534", "0.57195044", "0.5671975", "0.5632688", "0.55851847", "0.55442405", "0.55013937", "0.55002934", "0.54953384", "0.549166", "0.5421752", "0.5407499", "0.53741693", "0.536061", "0.5339668", "0.5322994", "0.5313418", "0.52839684", "0.5281418", "0.52539074", "0.52500135", "0.52442837", "0.52332354", "0.52205926" ]
0.68063223
0
[0x58, 0x59, 0x01, 0x00, 0x00] => "0x58, 0x59, 0x01, 0x00, 0x00"
def bytes_arr_to_hex_str(bytes_arr: List[int]) -> str: return ", ".join("0x%02x" % b for b in bytes_arr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upp_stringer(input_list): #input a characteristics list\r\n\toutput_list=[]\r\n\tfor item in input_list:\r\n\t\toutput_list.append(str(stellagama.pseudo_hex(item)))\r\n\treturn ''.join (output_list) #output a string\r", "def colors_to_string(colors):\n return ''.join(['%02x%02x%02x' % (r,g,b) for r,g,b in colors])", "def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s", "def _bitlist_to_string(self, data):\n result = []\n pos = 0\n c = 0\n while pos < len(data):\n c += data[pos] << (7 - (pos % 8))\n if pos % 8 == 7:\n result.append(c)\n c = 0\n pos += 1\n return bytes(result)", "def convert_to_string(_bytes: bytes)-> str:\n # print('input bytes: ', _bytes)\n # print('string: ', binascii.hexlify(_bytes))\n # print('string2: ', _bytes.hex())\n # print('string3: ', \" \".join([\"{:02x}\".format(x) for x in _bytes]))\n return \" \".join([\"{:02x}\".format(x) for x in _bytes])", "def unicoder(string):\n\treturn \"\\x00\".join(string) + \"\\x00\"", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def hex_list(self):\r\n return [''.join(['{:02X}'.format(b) for b in data]) for data in self.buffers()]", "def hex_dump(string):\n return ' '.join([\"%0.2X\" % ord(x) for x in string])", "def hexify(buffer):\n return ''.join('%02x' % ord(c) for c in buffer)", "def bit_array_to_string(array: Iterable) -> str:\n\n res = ''.join(\n [chr(int(y, 2)) for y in [''.join([str(x) for x in _bytes])\n for _bytes in Des.n_split(array, 8)]])\n return res", "def list_to_string(list):\n if len(list) == 1:\n string = '{}x1'.format(list[0])\n elif list[1:] == list[:-1]:\n string = '{}x{}'.format(list[1], len(list))\n else:\n string = ''\n for i in range(len(list) - 1):\n string += str(list[i]) + ','\n string += str(list[-1])\n return string", "def format_hex(self, list_converted):\n dict_hex = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}\n list_converted = [dict_hex[n] if n in dict_hex.keys() else str(n) for n in list_converted]\n return list_converted", "def uInt32HexListStr(uInt32List):\n \n outputStr = \"\"\n for value in uInt32List:\n outputStr += \"\\n\\t\" + uInt32HexStr(value)\n outputStr += \"\\n\"\n return outputStr", "def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a", "def dump( n ):\n\n s = '%x' % n\n if len(s) & 1:\n s = '0' + s\n return s.decode('hex')", "def ByteToHex( bins ):\r\n\r\n return ''.join( [ \"%02X\" % x for x in bins ] ).strip()", "def get_byte_string(self):\n return \"\".join(['%02X' % i for i in self._data]).decode('hex')", "def to_hex_str(dec_array):\r\n\r\n as_hex = [f'{i:02x}' for i in dec_array]\r\n\r\n return ''.join(as_hex)", "def numList2String(l):\n\treturn ''.join(map(chr, l))", "def array2hex(array):\n hexstr = \"\"\n for i in range(4):\n hexstr += ''.join('{:02x}'.format(x) for x in array[i])\n return hexstr", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def b2hex(b):\n return \"\".join([\"%02x\"%_ for _ in b])", "def acgt_to_string(s: list[list[str]]) -> list[list[str]]:\r\n s_out = [[\"\"] for i in range(len(s))]\r\n for i in range(len(s) - 1):\r\n h = \"\"\r\n for j in range(len(s[i])):\r\n if s[i][j] == 0:\r\n h += \"00\"\r\n if s[i][j] == 1:\r\n h += \"01\"\r\n if s[i][j] == 2:\r\n h += \"10\"\r\n if s[i][j] == 3:\r\n h += \"11\"\r\n s_out[i][0] = h\r\n return s_out", "def bytes2hexstr(bytes_buffer, sep=''):\t\n\treturn sep.join(map(lambda x: '{0:02X}'.format(x), bytes_buffer))", "def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))", "def _reg_encode_utf16_list(self, xlist):\n t = '' \n for x in xlist: \n t += self._reg_encode_utf16(x + u'\\u0000') # null term \n t += self._reg_encode_utf16(u'\\u0000') # end of list (double null) \n return t", "def bits2string(b=None):\n return ''.join([chr(int(x, 2)) for x in b])", "def MakeReadableString(val):\n printable = string.digits + string.letters + string.punctuation + ' ' + '\\t'\n out = []\n for c in val:\n if c in printable:\n out.append(' %c ' % c)\n else:\n out.append('0x%02x ' % ord(c))\n return ''.join(out)", "def ListToStr(val):\n return ''.join(['%c' % c for c in val])" ]
[ "0.71455836", "0.66164225", "0.6604635", "0.65370417", "0.65258", "0.65230733", "0.6488749", "0.6480381", "0.64735025", "0.6464688", "0.6441291", "0.6422277", "0.639696", "0.63435936", "0.63080764", "0.62981135", "0.6286795", "0.625519", "0.6242381", "0.6226093", "0.6198905", "0.6193561", "0.61770624", "0.6135634", "0.6122604", "0.6117582", "0.61158854", "0.6108902", "0.6105596", "0.6097811" ]
0.67328227
1
"0x58, 0x59, 0x01, 0x00, 0x00" => [0x58, 0x59, 0x01, 0x00, 0x00]
def hex_str_to_bytes_arr(bytes_str: str) -> List[int]: return eval(f"[{bytes_str}]")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex_list(self):\r\n return [''.join(['{:02X}'.format(b) for b in data]) for data in self.buffers()]", "def buf_to_list(buf):\r\n buf_stripped = buf.raw.decode().rstrip('\\x00')\r\n# for ch in buf_stripped:\r\n# if (ch == '0') or (ch == '\\t') or (ch == '\\n'):\r\n# name = name.rstrip(',')\r\n# if len(name) > 0:\r\n# namelist.append(name)\r\n# name = ''\r\n# if ch == '\\000':\r\n# break\r\n# else:\r\n# name += ch\r\n#\r\n# return namelist\r\n return buf_stripped.split(', ')", "def decompose_byte(data: str, nibble: bool = False) -> list:\n _bytes = int(len(sanatize_hex(data)) / 2)\n mem_size = 8\n if nibble:\n mem_size = 4\n binary_data = format(int(str(data), 16), f\"0{_bytes*8}b\")\n return [\n format(int(binary_data[mem_size * x : mem_size * (x + 1)], 2), f\"#0{int(mem_size/2)}x\")\n for x in range(0, int(len(binary_data) / mem_size))\n ]", "def _string_to_bitlist(self, data):\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n # bit-wise operation\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n return result", "def StrToList(val):\n return [ord(c) for c in val]", "def bytes_to_uuid_list(byte_array):\n result = []\n for i in range(0, len(byte_array)//16):\n result.append(uuid.UUID(bytes=bytes(byte_array[i*16:i*16+16])))\n return result", "def string_block(data):\n strings = []\n while True:\n crc = unpack(\"<I\", data)\n if 255 > crc > 0:\n break\n strings.append(de_string(data).decode('utf-8').split(':'))\n return strings", "def _bytestringToValuelist(bytestring, numberOfRegisters):\n _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n _checkString(bytestring, 'byte string', minlength=numberOfBytes, maxlength=numberOfBytes)\n\n values = []\n for i in range(numberOfRegisters):\n offset = _NUMBER_OF_BYTES_PER_REGISTER * i\n substring = bytestring[offset : offset + _NUMBER_OF_BYTES_PER_REGISTER]\n values.append(_twoByteStringToNum(substring))\n\n return values", "def string_to_bit_array(text_string: str) -> list:\n\n array = list()\n for char in text_string:\n # Get the char value on one byte\n bin_val = Des.bin_value(char, 8)\n # Add the bits to the final list\n array.extend([int(x) for x in list(bin_val)])\n return array", "def Read2000256List(self):\n items = []\n for i in range(0, 2000):\n data = self.ReadBytes(64)\n ba = bytearray(binascii.unhexlify(data))\n ba.reverse()\n items.append(ba.hex().encode('utf-8'))\n return items", "def _get_string_list(property_value):\n property_value = property_value.strip(b'\\x00').decode('utf-8')\n property_value = property_value.split('\\x00')\n return property_value, ''", "def string_to_bit_array(text):\n array = list()\n for char in text:\n bin_val = bin_value(char, 8) # Get value of char in one byte\n array.extend([int(x) for x in list(bin_val)]) # Add the bits to the list\n return array", "def int_list(data: bytes) -> list:\n byte_data = BytesIO(data)\n byte_list = []\n single_byte = byte_data.read(1)\n while single_byte != b\"\" and single_byte != \"\":\n single_int = byte_to_int(single_byte)\n byte_list.append(single_int)\n single_byte = byte_data.read(1)\n return byte_list", "def hex_to_spins(self, hex_spins):\n \n # purely alphanumeric strings only\n assert(hex_spins.isalnum())\n \n binary = '{:0{}b}'.format(int(hex_spins,16), self.size)\n spins = bitarray.bitarray(binary)\n\n return spins", "def bin_to_nibbles(s):\n return [hti[c] for c in encode_hex(s)]", "def read_bytes_to_list(path):\n vstup = []\n index = 0\n with open(path, \"rb\") as f:\n\n byte = f.read(1)\n while byte != '':\n index = index + 1\n vstup.append(struct.unpack('b', byte)[0])\n byte = f.read(1)\n\n if not byte:\n break\n return vstup", "def test_deserialize_list():\n input = bytes([\n *UnsignedInt.to_bytes(5),\n *UnsignedInt.to_bytes(1),\n *UnsignedInt.to_bytes(2),\n *UnsignedInt.to_bytes(3),\n *UnsignedInt.to_bytes(4),\n *UnsignedInt.to_bytes(5),\n ])\n assert [1, 2, 3, 4, 5] == List(UnsignedInt).read(input)", "def getHexwords(msg):\n hexwords = []\n for i in range(0, len(msg), 8):\n msgBlock = msg[i:i+8]\n m = stringToHex(msgBlock)\n hexwords.append(m)\n\n last = hexwords[-1]\n hexwords[-1] += ''.join(['0'] * (16-len(last)))\n return hexwords", "def hex2dec_on_list(lst):\n data = []\n for i, val in enumerate(lst):\n data.append(hex2dec(val))\n return data", "def hex_to_RGB(hex_code: str) -> list:\n\n hex_code = hex_code.lstrip('#')\n return [int(hex_code[i:i + 2], 16) for i in (0, 2, 4)]", "def getpalette(data):\n\tpalette = []\n\tstring = StringIO(data)\n\twhile True:\n\t\ttry:\n\t\t\tpalette.append(unpack(\"<4B\", string.read(4)))\n\t\texcept StructError:\n\t\t\tbreak\n\treturn palette", "def byte2array(bytes):\n array = []\n for i, byte in enumerate(bytes):\n if i % 4 == 0:\n array.append([byte])\n else:\n array[i // 4].append(byte)\n return array", "def convert_unicode_field(string):\n values = []\n for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:\n values.append(u\"\".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))\n return values", "def convert_uint32_to_array(value):\n return [\n (value >> 0 & 0xFF),\n (value >> 8 & 0xFF),\n (value >> 16 & 0xFF),\n (value >> 24 & 0xFF)\n ]", "def decode(self, s):\n lststr = s.split(',')\n if s=='': return []\n rst = []\n for i in range(len(lststr)):\n rst.append(lststr[i])\n return rst", "def bits(data):\n\treturn [format(ord(c),'08b') for c in data]", "def strToList(x):\n if type(x)==str:\n return [int(i) for i in x[1:-1].split(\", \")]", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def decode_int_list(L):\n return [] if L == '[]' else [int(a) for a in L[1:-1].split(\",\")]", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]" ]
[ "0.65582275", "0.63307226", "0.6290027", "0.6262765", "0.61640704", "0.6160979", "0.6156085", "0.61130404", "0.610784", "0.60554963", "0.6033458", "0.5946255", "0.5939836", "0.58945185", "0.5894092", "0.5832303", "0.5822892", "0.58158875", "0.58045244", "0.580421", "0.5776219", "0.57684356", "0.5756212", "0.5755542", "0.5742121", "0.57156235", "0.56978893", "0.5680594", "0.56789136", "0.56716496" ]
0.6725189
0
load csv into data frame
def load_data(csv_path): df = pd.read_csv(csv_path) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def read_csv():", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def _load_csv_into_df(csv_file: Any, csv_name: str) -> pd.DataFrame:\n try:\n df = pd.read_csv(csv_file, sep=\"|\", header=0, dtype=str, encoding=\"UTF-8\")\n except ValueError as e:\n print(f\"ERROR! Could not read the file {csv_name}: {e}\")\n raise\n return df", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def csv_to_df(self, path=None):\n # reads the csv file and puts it to the dataframe\n df = pd.read_csv(path)\n return df", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def loadCSV(input_file):", "def read_csv(csv_path):\n \n df = pd.read_csv(csv_path)\n\n return df", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):\n print \"Loading %s and creating DataFrame..\" % catalog\n df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)\n print \"..Done\\n----------\"\n return df_imported", "def create_dataframe_from_csv(path_to_csv_file):\r\n df = pd.read_csv(path_to_csv_file)\r\n return df", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def load_to_dataframe(self) -> DataFrame:\n return read_csv(self._csv_path, converters={\n # Check if embedding size is the empty string,\n # as it would be for Count models\n \"Embedding size\": lambda v: int(float(v)) if len(v) > 0 else nan\n })", "def read_data_from_csv(filename):\n df = pd.read_csv(filename)\n return df", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def _csv_to_df(csv_path, headers):\n\n # Assume all columns are strings\n columns_types = {i: str for i, header in enumerate(headers)}\n\n temp_df = pd.read_csv(csv_path, converters=columns_types, skip_blank_lines=False)\n # TODO: check that there are only two columns of type string, then convert to our format\n temp_df.columns = headers\n # Add the column split, this is all training data\n temp_df['annotation_unit_id'] = None\n return temp_df", "def read_csv(path):\n return pd.read_csv(path)", "def _load_stored_csv(path: Union[Path, str]) -> Union[pd.DataFrame, pd.Series]:\n data = pd.read_csv(path, index_col=0, parse_dates=[0]).round(12)\n data.index = data.index.tz_convert(REFERENCE_TZ)\n return data", "def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df" ]
[ "0.8076421", "0.7554986", "0.7526359", "0.74822295", "0.7439858", "0.74330825", "0.7395306", "0.73376906", "0.7332784", "0.7308946", "0.7283677", "0.7266445", "0.7250622", "0.72173434", "0.7191797", "0.71897143", "0.71263695", "0.71054494", "0.71051294", "0.70921", "0.70683056", "0.7062756", "0.7036574", "0.7033014", "0.7014973", "0.70010865", "0.69997203", "0.69875467", "0.6979487", "0.696777" ]
0.7674975
1
split survivals results out of df
def split_outcomes(df): outcomes = df['Survived'] df = df.drop('Survived', axis=1) return outcomes, df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_phases(df):\n return(\n tuple([df.groupby('Phase').get_group(p) for p in df.Phase.unique()])\n )", "def show_diverse_recs(res, threshold):\n rec_ids = [] # result list\n while len(rec_ids) < threshold:\n for clust in res[\"CENTROID\"].unique():\n cluster_rec = res[res[\"CENTROID\"] == clust]\n if len(rec_ids) < threshold:\n for i in cluster_rec.index:\n if i in rec_ids:\n continue\n else:\n rec = i\n if rec not in rec_ids:\n rec_ids.append(rec) # add unique rec\n break\n else:\n continue\n else:\n break\n # return subset of df with re-arranged items\n return res.loc[rec_ids]", "def split_data(df):\n # drop any instances that have missing values\n df = df.dropna()\n\n # define features\n features = df[['pitch_type', 'release_speed', 'release_spin_rate',\n 'if_fielding_alignment', 'launch_angle', 'launch_speed',\n 'hc_x', 'hc_y', 'stand', 'type', 'RH']]\n\n # make dummies for categorical features\n features = pd.get_dummies(features)\n\n # define label\n label = df['hit']\n\n # split data into test and training\n features_train, features_test, label_train, label_test = \\\n train_test_split(features, label, test_size=0.3)\n\n standard = StandardScaler()\n\n features_train = standard.fit_transform(features_train)\n features_test = standard.transform(features_test)\n\n return features_train, features_test, label_train, label_test", "def DivideDF(all_data):\n return all_data.iloc[:890], all_data.iloc[891:].drop(\"Survived\", axis=1)", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def _partitionize(df, settings, grids, frag):\n column = settings['feature']\n if len(df) > 0:\n init, end, end2 = grids\n tmp = df.apply(lambda row: _inblock(row, column, init, end), axis=1)\n tmp = df.loc[tmp]\n\n if len(frag) > 0:\n frag = pd.concat([frag, tmp])\n else:\n frag = tmp\n return frag", "def get_possible_splits( df , attribute ):\n ds = df.loc[:,attribute]\n \n # First sort the values \n ds = ds.sort_values().drop_duplicates()\n \n # Compute averages of consecutive values \n ds = ds.rolling(2).sum().divide(2)\n splits = ds[1:].tolist()\n \n # return the possible splits \n return splits", "def group(df, dvmin, dvmax, step):\n\tr = step/2\n\tres = []\n\n\tfor ticker in range(dvmin, dvmax, step):\n\t\t#select values by left-right difference in sum in range (x-r, x+r). x is the middle value of a bucket. \n\t\tsubgroup = df.loc[(df['diff']>ticker-r) & (df['diff']<ticker+r)\n\t\t\t& (df['choice'] != 0.5)]\n\t\t#count frequency of choosing left\n\t\tnum = subgroup['choice'].sum()\n\t\t#total number of datapoints in the bucket\n\t\tdenom = subgroup.shape[0]\n\t\t#calculate and append the prob. append 0 if empty bucket\n\t\tres.append(num/denom) if denom else res.append(0)\n\treturn res", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def reduce(df_slice):\n reduc = df_slice[[\"precinct\", \"party\", \"candidate\", \"votes\", \"county\", \"office\"]]\n result = reduc.set_index(['office', 'precinct', 'county', 'candidate'])['votes'].unstack()\n return result.reset_index()", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def split(df, stratify_by=None):\n \n if stratify_by == None:\n train, test = train_test_split(df, test_size=.3, random_state=123)\n train, validate = train_test_split(df, test_size=.3, random_state=123)\n else:\n train, test = train_test_split(df, test_size=.2, random_state=123, stratify=df[stratify_by])\n train, validate = train_test_split(df, test_size=.3, random_state=123, stratify=train[stratify_by])\n \n return train, validate, test", "def potential_splits(self, potential_xj):\r\n \r\n self.cur.execute(\"SELECT DISTINCT \" + potential_xj + \" FROM \" + self.table_name + \";\")\r\n potential_splits = [ii[0] for ii in self.cur.fetchall()]\r\n return potential_splits", "def split_dataframe(df, split_elements_list):\n y = df.filter(split_elements_list)\n x = df.drop(split_elements_list, axis=1)\n\n return x, y", "def split_data(df):\n\n df['ranked_latest'] = df.groupby(['userId'])['timestamp'].rank(method='first', ascending=False)\n train_df = df[df['ranked_latest'] != 1]\n test_df = df[df['ranked_latest'] == 1]\n\n train_df = train_df[['userId', 'movieId', 'rating']]\n test_df = test_df[['userId', 'movieId', 'rating']]\n\n return train_df, test_df", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df", "def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df", "def __dividePandas(df, column, value):\n if isinstance(value, int) or isinstance(value, float): #Check if value is a #\n #Divide the rows into two sets and return them\n set1 = df[df[column] >= value] #Observations greater than or equal to value\n set2 = df[df[column] < value] #Observations less than value are in set2\n else:\n set1 = df[df[column] == value] #Observations equal to value are in set 1\n set2 = df[df[column] != value] #Observations not equal to value are in set2 \n return (set1, set2)", "def df_group_opbreken(df_in):\n df_lijst_als_groter_dan=[]\n for df in df_in.itertuples():\n df_lijst_als_groter_dan.append(df)\n\n return df_lijst_als_groter_dan", "def clean(df):", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def disagg(vec:gpd.GeoDataFrame):\n\t\t# Split GeometryCollections\n\t\tno_coll = []\n\t\tfor i, row in vec.iterrows():\n\t\t\tgeom = row.geometry\n\t\t\tif geom.type == 'GeometryCollection':\n\t\t\t\tfor part in geom:\n\t\t\t\t\trow2 = row.copy()\n\t\t\t\t\trow2.geometry = part\n\t\t\t\t\tno_coll.append(row2)\n\n\t\t\telse:\n\t\t\t\t\tno_coll.append(row) \n\n\t\t# Split Multi geomries\n\t\tres = []\n\t\tfor row in no_coll:\n\t\t\tgeom = row.geometry\n\t\t\tif geom.type.startswith('Multi'):\n\t\t\t\tfor part in geom:\n\t\t\t\t\trow2 = row.copy()\n\t\t\t\t\trow2.geometry = part\n\t\t\t\t\tres.append(row2)\n\t\t\telse:\n\t\t\t\t\tres.append(row)\n\n\t\treturn gpd.GeoDataFrame(res, crs=vec.crs).reset_index(drop=True)", "def split_by_user(df: pd.DataFrame, train_ratings_num: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n df = index_items(df)\n train = df[df.item_index < train_ratings_num].drop(columns=['item_index'])\n test = df[df.item_index >= train_ratings_num].drop(columns=['item_index'])\n return train, test", "def split(interactions: pd.DataFrame, p: float = 0.25) -> Tuple[pd.DataFrame, pd.DataFrame]:\n test = interactions.groupby('track_id').sample(frac=p)\n rows = set((a, b) for _, (a, b, _) in test.iterrows())\n train_mask = [i for i, (_, (a, b, _)) in tqdm(enumerate(interactions.iterrows()), desc=\"Constructing train-set\",\n total=len(interactions)) if (a, b) not in rows]\n train = interactions.iloc[train_mask]\n\n return train, test", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def split_parliament(parliament_df):\n pro_independence = get_pro_independence_parties()\n votes = parliament_df[VOTES].to_dict()\n deputies = parliament_df[SEATS].to_dict()\n const_votes = sum([v for k, v in votes.items() if k not in pro_independence])\n indep_votes = sum([v for k, v in votes.items() if k in pro_independence])\n const_diput = sum([v for k, v in deputies.items() if k not in pro_independence])\n indep_diput = sum([v for k, v in deputies.items() if k in pro_independence])\n return {NO_INDEPENDENCE: {VOTES: const_votes, SEATS: const_diput},\n PRO_INDEPENDENCE: {VOTES: indep_votes, SEATS: indep_diput}}", "def __split_df(self, df:pd.DataFrame, ratio:float, rem_day4:bool, shuffle:bool, n_vec: int=1) -> Tuple[list, list, list, list]:\n X_test = []\n X_train = [] \n y_test = [] \n y_train = [] \n\n header = df['label'].tolist()\n responses = df['response'].tolist()\n # Removing Day 4\n trails = set()\n for i in range(len(header)):\n if rem_day4 and responses[i] == \"0\":\n pass\n else:\n trails.add(header[i])\n \n header = trails\n\n # Getting all the matrices from the trials\n for trial in header:\n # geting rows with (day, Trail)-label\n rows = df.loc[df['label'] == trial].to_numpy()\n # getting response label\n response = rows[0][-1]\n # getting the actual data from the matrix\n rows = np.delete(rows, np.s_[0,1,-1], axis=1)\n if shuffle:\n # shuffle PC-Matrix\n np.random.shuffle(rows)\n\n if n_vec == 1:\n pass\n else:\n new_rows = []\n # taking samples\n while len(rows) > n_vec:\n vecs = rows[:n_vec]\n # deleting vectors that are already taken\n rows = rows[n_vec:]\n # Concat vectors to one\n new_rows.append(np.concatenate(vecs))\n rows = new_rows\n\n # Splitting into Test and training\n cut = int(ratio*len(rows))\n for i in range(len(rows)):\n if i < cut or ratio == 0.0:\n X_train.append(rows[i])\n y_train.append(response)\n else:\n X_test.append(rows[i])\n y_test.append(response)\n\n return X_train, X_test, y_train, y_test", "def related_df_shaper(df): \n id_related=list()\n id_primary=list()\n id_relation_type=list()\n for id_term in df.id_term:\n \n related_id_list=df.loc[df.id_term==id_term,'related_terms'].values[0]\n id_relation_type_list=df.loc[df.id_term==id_term,'id_relation_type'].values[0]\n for i in range(len(related_id_list)):\n id_related.append(related_id_list[i])\n id_relation_type.append(id_relation_type_list[i])\n id_primary.append(id_term)\n \n df_rs=pd.DataFrame({'id_term':id_primary,'id_term_related':id_related,'id_relation_type':id_relation_type})\n now=pd.to_datetime(datetime.datetime.now())\n df_rs=df_rs.assign(datetime_created=now)\n df_rs=df_rs.assign(datetime_updated=now)\n df_rs=df_rs.assign(id_user_created=7)\n df_rs=df_rs.assign(id_user_updated=7)\n \n return df_rs" ]
[ "0.60274947", "0.59297115", "0.57942295", "0.567151", "0.56509566", "0.56292737", "0.5605837", "0.5603", "0.5589303", "0.55613124", "0.5553913", "0.54861426", "0.5476571", "0.5466991", "0.5439459", "0.53859234", "0.5374307", "0.5348394", "0.53154784", "0.5299357", "0.5229916", "0.52175814", "0.5215912", "0.52135265", "0.5196133", "0.5190242", "0.51827097", "0.51814616", "0.5145087", "0.51442796" ]
0.62963355
0
Read schedule configuration from file and load the json.
def get_schedules(): path = config.get('schedule', 'paths', './schedule.json') with open(path) as schedule_file: return json.load(schedule_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def load_irrigation_schedule():\n global irrigation_schedule\n\n # Read and parse the properties file.\n if not os.path.exists(FILE_PROPERTIES):\n return\n\n f = open(FILE_PROPERTIES)\n try:\n data = json.loads(f.read())\n except JSONDecodeError:\n data = {}\n finally:\n f.close()\n\n if PROP_SCHEDULE in data:\n irrigation_schedule = data[PROP_SCHEDULE]", "def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)", "def load_conf(self, filename):\n\n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename) as file:\n self.conf = json.loads(file.read())", "def reload_schedule():\n global jsonConfig\n global curSchedule\n\n jsonConfig = None\n curSchedule = None\n\n # Clear currently scheduled bells.\n schedule.clear(\"current\")\n\n logging.debug(\"Reloading schedule...\")\n with open(jsonFile) as jsonFileHandle:\n jsonConfig = json.load(jsonFileHandle)\n\n # Check that default structure for json config is respected.\n if \"calendar\" not in jsonConfig or \"default\" not in jsonConfig[\"calendar\"]:\n logging.error(\"Malformed json config. Invalid calendar table.\")\n return\n elif \"schedules\" not in jsonConfig:\n logging.error(\"Malformed json config. Invalid schedules table.\")\n return\n elif \"patterns\" not in jsonConfig:\n logging.error(\"Malformed json config. Invalid patterns table.\")\n return\n\n # Check to see if this date has a specific schedule.\n curDate = datetime.datetime.today().strftime(\"%Y-%m-%d\")\n if curDate in jsonConfig[\"calendar\"]:\n curSchedule = jsonConfig[\"calendar\"][curDate]\n else:\n # If this isn't a special day, we look up the schedule by day of the week.\n curDayOfWeek = datetime.datetime.now().strftime(\"%A\")\n if curDayOfWeek in jsonConfig[\"calendar\"][\"default\"]:\n curSchedule = jsonConfig[\"calendar\"][\"default\"][curDayOfWeek]\n else:\n logging.debug(\"No schedule found for date.\")\n return\n\n # Now that we have the schedule to use, does it exist?\n if curSchedule not in jsonConfig[\"schedules\"]:\n logging.error(\"Schedule\" + curSchedule + \" not found in json config. Aborting.\")\n return\n\n # Add bells for this schedule.\n for bellTime in jsonConfig[\"schedules\"][curSchedule]:\n schedule.every().day.at(bellTime).do(ring_bells).tag(\"current\")\n logging.debug(\"Scheduled bells using pattern '\" + jsonConfig[\"schedules\"][curSchedule][bellTime] + \"' at \" + bellTime)", "def read(self):\r\n try:\r\n with open(self.filename, 'r') as f:\r\n self.__config = json.load(f)\r\n except (IOError, OSError) as e:\r\n # File reading error\r\n if not os.path.exists(self.filename):\r\n self.__config = {}\r\n else:\r\n raise\r\n except ValueError:\r\n # JSON decoding error\r\n raise", "def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config", "def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)", "def load_config():\n here = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(here, 'config.json')\n with open(config_path, encoding='utf-8') as f:\n return json.load(f)", "def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)", "def _load_config(path) -> dict:\n with open(path, \"r\") as F:\n return json.load(F)", "def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))", "def Load(self, filename):\n if os.path.exists(filename):\n\n norm_file_path = os.path.normpath(filename)\n\n if self.verbose:\n\n print \"Loading schedule '%s'\" % norm_file_path\n \n try:\n \n self._schedule_data = yaml.load(open(norm_file_path,'rb'))\n \n except yaml.YAMLError, exc: \n\n raise errors.ScheduleError(\"Failed to load schedule '%s' from file: %s\" % (filename, exc))\n\n else:\n\n self._schedule_loaded = False\n \n raise errors.ScheduleError(\"Schedule file '%s' doesn't exist\" % filename)\n\n try:\n\n self.ParseSchedule(self._schedule_data)\n\n except errors.ScheduleError, e:\n\n print \"%s\" % e\n\n self._schedule_loaded = False\n\n self._schedule_loaded = True", "def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)", "def __load_config(self) -> dict:\n file = open(\"config.json\")\n config_file = json.load(file)\n file.close()\n return config_file", "def _read_config_file(self):\r\n\r\n try:\r\n with open(self.config, 'r') as f:\r\n config_data = json.load(f)\r\n except FileNotFoundError:\r\n config_data = {}\r\n\r\n return config_data", "def _load_schedule(self, filename):\n with open(filename, 'rt', encoding='utf-8') as f:\n xml = f.read()\n\n # Compose the message for the controller.\n message = ScheduleDefinitionMessage(xml, os.path.realpath(filename))\n\n # Send the message to the controller.\n self._zmq_controller.send_pyobj(message)\n\n # Await the response from the controller.\n response = self._zmq_controller.recv_json()\n\n if response['ret'] == 0:\n self._io.log_verbose(response['message'])\n else:\n self._io.error(response['message'])\n\n return response['ret'] == 0", "def read(self,filename):\n with open(str(filename),\"r\") as f:\n data = f.read()\n #check if the loaded file is json\n try:\n datajson = json.loads(data)\n except Exception as e:\n if mer == True:\n merrors.error('could not load '+str(filename)+', add a basic entry to the config like {\"name\":\"Example\"}. Python error: '+str(e))\n quit()\n else:\n print(\"could not load \"+str(filename)+\". Python error: \"+str(e))\n quit()\n self.datajson = datajson\n self.filename = filename\n f.close()", "def load_from_file(config_path):\n return load_json_file(config_path)", "def _load_config_file(self, config_type):\n cloudwatch_config = self.provider_config[\"cloudwatch\"]\n json_config_file_section = cloudwatch_config.get(config_type, {})\n json_config_file_path = json_config_file_section.get(\"config\", {})\n json_config_path = os.path.abspath(json_config_file_path)\n with open(json_config_path) as f:\n data = json.load(f)\n return data", "def load(self):\n with sppasPathSettings() as sp:\n config = os.path.join(sp.etc, \"sppas.json\")\n if os.path.exists(config) is False:\n raise OSError(\"No such file or directory: {:s}\".format(config))\n else:\n with open(config) as cfg:\n self.__dict__ = json.load(cfg)", "def _load_config():\n fname = _get_config_fname()\n if fname is None or not op.isfile(fname):\n return dict()\n with open(fname, 'r') as fid:\n config = json.load(fid)\n return config", "def read_config():\n with open(CONFIG_PATH) as config_file:\n return json.load(config_file)", "def config_from_json(self, filename):\n with open(filename, 'r') as f:\n config = json.load(f)\n config = self._process_config_imports(config)\n self.config.update(config)", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def open_config_file(file):\n with open(file, \"r\") as file_open:\n return json.load(file_open)", "def config():\n with open(config_path) as config_file:\n data = json.load(config_file)\n return data", "def load_config(config_file):\n try:\n with open('settings.json', 'r') as f:\n return json.loads(f.read())\n except (IOError, Exception) as e:\n print '%s' % e\n exit()" ]
[ "0.7224128", "0.7197243", "0.7081696", "0.7046951", "0.70186424", "0.6930678", "0.69084924", "0.68250173", "0.68214375", "0.67013276", "0.66890836", "0.66843784", "0.66651", "0.6656593", "0.6648547", "0.6635397", "0.6628288", "0.65703183", "0.6557144", "0.65432274", "0.65169567", "0.65048707", "0.6496402", "0.6456956", "0.64544624", "0.64536804", "0.64536804", "0.641231", "0.6408092", "0.6402381" ]
0.7330782
0
Check all schedule configurations to start and stop instances
def schedule(): for profile in schedules['profiles']: instances = _get_instances(profile['instance_tags'], profile['region']) start_stop_instances(instances, profile['schedule']) reregister_elb_instances(profile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"", "def __run_schedules():\n while True:\n __scheduler.run()", "def test_retrieve_instances_schedule_state(self):\n pass", "def test_autoscaling_schedules_unset(self) -> None:\n if self.prod_env:\n schedules = self.autoscaling.describe_scheduled_actions(AutoScalingGroupName='saints-xctf-server-prod-asg')\n self.assertTrue(len(schedules.get('ScheduledUpdateGroupActions')) == 0)\n else:\n self.assertTrue(all([\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-morning',\n recurrence='30 11 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-morning',\n recurrence='30 13 * * 1-5', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-afternoon',\n recurrence='30 22 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-night',\n recurrence='30 3 * * 2-6', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekend', recurrence='30 11 * * 0,6',\n max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekend', recurrence='30 3 * * 0,1',\n max_size=0, min_size=0, desired_size=0)\n ]))", "def test_update_instances_schedule_state(self):\n pass", "def checkUpstreamScheduler():", "def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def _create_schedules(self):\n\n ''''''", "def test_list_schedules(self):\n pass", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "def all():\n schedule = Scheduler()\n schedule.committees()\n schedule.legislators()\n schedule.bills()", "def run(self):\n for req, resp in self.servings:\n resp.check_timeout()", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def run_scheduled_tasks(self) -> None:\n self.scheduler.run(False)", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def initialize_scheduler():\n\n with SCHED_LOCK:\n\n # Check if scheduler should be started\n start_jobs = not len(SCHED.get_jobs())\n\n # Update check\n github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0\n\n schedule_job(versioncheck.checkGithub, 'Check GitHub for updates',\n hours=0, minutes=github_minutes, seconds=0)\n\n # Our interval should never be less than 30 seconds\n monitor_seconds = CONFIG.MONITORING_INTERVAL if CONFIG.MONITORING_INTERVAL >= 30 else 30\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(plextv.get_real_pms_url, 'Refresh Plex server URLs',\n hours=12, minutes=0, seconds=0)\n schedule_job(pmsconnect.get_server_friendly_name, 'Refresh Plex server name',\n hours=12, minutes=0, seconds=0)\n\n schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.NOTIFY_RECENTLY_ADDED))\n schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.MONITOR_REMOTE_ACCESS))\n schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',\n hours=12 * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)\n\n # If we're not using websockets then fall back to polling\n if not CONFIG.MONITORING_USE_WEBSOCKET or POLLING_FAILOVER:\n schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',\n hours=0, minutes=0, seconds=monitor_seconds)\n\n # Refresh the users list and libraries list\n user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12\n library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12\n\n if CONFIG.PMS_TOKEN:\n schedule_job(plextv.refresh_users, 'Refresh users list',\n hours=user_hours, minutes=0, seconds=0)\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(pmsconnect.refresh_libraries, 'Refresh libraries list',\n hours=library_hours, minutes=0, seconds=0)\n\n backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6\n\n schedule_job(database.make_backup, 'Backup PlexPy database',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n schedule_job(config.make_backup, 'Backup PlexPy config',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n\n # Start scheduler\n if start_jobs and len(SCHED.get_jobs()):\n try:\n SCHED.start()\n except Exception as e:\n logger.info(e)\n\n # Debug\n #SCHED.print_jobs()", "async def run_scheduler(self):\n while True:\n interval = 60\n for s in await self.get_service('data_svc').locate('schedules'):\n now = datetime.now().time()\n diff = datetime.combine(date.today(), now) - datetime.combine(date.today(), s.schedule)\n if interval > diff.total_seconds() > 0:\n self.log.debug('Pulling %s off the scheduler' % s.name)\n sop = copy.deepcopy(s.task)\n sop.set_start_details()\n await self._services.get('data_svc').store(sop)\n self.loop.create_task(self.run_operation(sop))\n await asyncio.sleep(interval)", "def delete_schedule(sender, instance, **kwargs):\n try:\n instance.schedule_on.delete()\n except (AssertionError, AttributeError) as e:\n print('No on schedule')\n try:\n instance.schedule_off.delete()\n except (AssertionError, AttributeError) as e:\n print('No off schedule')\n try:\n instance.schedule_on.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab on')\n try:\n instance.schedule_off.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab off')", "def found_schedules(self) -> bool:\n return self._schedule_list != []", "def check_configs(self):\n\n pass", "def start_monitor():\n monitor_enabled = config_json[env]['MONITOR_ENABLED']\n monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] )\n\n # IF SCHEDULE IS ENABLED IN CONFIG:\n if monitor_enabled == \"1\":\n\n print(\"\\nSpace Weather Service Monitor: ENABLED (running every %s seconds)\" % monitor_trigger_interval_s)\n\n # RUN INITIAL CHECK SPACE WEATHER\n processes.process_check_space_weather()\n\n # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START\n scheduler = BackgroundScheduler()\n scheduler.add_job(\n func = processes.process_check_space_weather,\n trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ),\n id = 'check_space_weather',\n name = 'Checking Space Weather Every 30 Seconds')\n scheduler.start()\n atexit.register( lambda: scheduler.shutdown() )\n else:\n print(\"\\nSpace Weather Service Monitor: DISABLED\")", "def schedule_start(self):\n print(\"Scheduler for monitoring request is running\")\n self.initialize_scheduler()", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def stopSchedule(self):\n DPxStopDinSched()", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number" ]
[ "0.71326065", "0.65044415", "0.64524704", "0.6353409", "0.6326504", "0.6221054", "0.61959714", "0.6026816", "0.60222584", "0.59739757", "0.59491706", "0.592607", "0.5874639", "0.5816534", "0.57862383", "0.5757011", "0.573736", "0.5729255", "0.5709102", "0.5708846", "0.57071704", "0.5674211", "0.56741244", "0.5672439", "0.5656557", "0.56179106", "0.55975294", "0.55974936", "0.55790335", "0.55688196" ]
0.6757293
1
Get boto ec2 instance objects by provided tags
def _get_instances(instance_tags, region): return ec2_conn[region].get_all_instances(filters={"tag:Name": instance_tags})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def get_ec2(self, name: str) -> list:\n filters = [\n {\n 'Name': 'tag:Name',\n 'Values': [name]\n },\n {\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }\n ]\n\n return list(self.ec2.instances.filter(Filters=filters).all())", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def get_images(owner, tagvalue):\n try:\n images = ec2(credentials).describe_images(Owners=[owner],Filters=[{'Name':'tag-value', 'Values':[tagvalue]}])\n return images\n except Exception as e:\n print(\"Error: cannot get the list of images. %s\" % e)", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')", "def start(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.start_instances(InstanceIds=[instance_id])\n print(f\"Start instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()", "def get_tags_for_instance(self, instance_id):\n try:\n response = self.ec2.describe_instances(InstanceIds=[instance_id])\n except Exception as e:\n logger.info(e)\n return []\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n if instance['InstanceId'] == instance_id:\n return instance['Tags']\n return []", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def __init__(self, looking_for_tags: dict):\n self.looking_for_tags = looking_for_tags\n self.ec2 = boto3.resource('ec2')", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def ec2_list(ctx):\n\n from opstools.aws import ec2_list as this_ec2_list\n this_ec2_list.main()" ]
[ "0.7924737", "0.7872538", "0.76130366", "0.70465094", "0.6994373", "0.6934099", "0.692257", "0.6913484", "0.6890489", "0.6859421", "0.6760221", "0.6676493", "0.6670747", "0.6642421", "0.6551454", "0.65225965", "0.65085363", "0.6453371", "0.6426066", "0.6425665", "0.6411169", "0.63397914", "0.62947345", "0.6238381", "0.6222572", "0.61844486", "0.60882026", "0.6042407", "0.6042277", "0.6031871" ]
0.80162674
0
Start and stop the instances given a schedule
def start_stop_instances(instances, schedule): for reservation in instances: for instance in reservation.instances: region = instance.placement if instance.state == 'running' and _get_desired_state(schedule) == 'stop': print "Should stop " + instance.id + "." instance.stop() elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start': print "Should start " + instance.id + "." instance.start() else: print "Nothing to do."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schedule():\n for profile in schedules['profiles']:\n instances = _get_instances(profile['instance_tags'], profile['region'])\n start_stop_instances(instances, profile['schedule'])\n reregister_elb_instances(profile)", "def stopSchedule(self):\n DPxStopDinSched()", "async def test_stop(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Set schedule interval\n interval_schedule = IntervalSchedule()\n interval_schedule.exclusive = False\n interval_schedule.name = 'sleep1'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=1) # Set frequency of\n\n await scheduler.save_schedule(interval_schedule) # Save schedule updates\n await asyncio.sleep(10)\n\n await self.stop_scheduler(scheduler)", "def __run_schedules():\n while True:\n __scheduler.run()", "def start_stop(now, start, stop, temporary_user, config, tz):\n if now.time() >= start and now.time() < stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'stopped', now, tz)\n action_on_instances(temporary_user.start_instances, action_required_ids, 'Start')\n elif now.time() >= stop:\n action_required_ids, no_action_required_ids = get_instance_ids(temporary_user, config, 'running', now, tz)\n action_on_instances(temporary_user.stop_instances, action_required_ids, 'Stop')", "def startSchedule(self):\n DPxStartDinSched()", "def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])", "def stop(self):\n schedule = self._schedules[self._index]\n schedule.stop()\n self._stopped.set()\n self._started.clear()", "def run_scheduled_sprinkle():\n logger.info('Running scheduled sprinkles')\n now = timezone.now()\n scheduled_devices = SprinkleSchedule.objects.filter(next_schedule__lte=now)\n run_success = scheduled_sprinkle(scheduled_devices)\n save_scheduled_tasks(run_success)\n logger.info('run_success')\n SprinkleSchedule.objects.filter(id__in=[run['device'] for run in run_success]).update(last_run=now)", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "async def run_scheduler(self):\n while True:\n interval = 60\n for s in await self.get_service('data_svc').locate('schedules'):\n now = datetime.now().time()\n diff = datetime.combine(date.today(), now) - datetime.combine(date.today(), s.schedule)\n if interval > diff.total_seconds() > 0:\n self.log.debug('Pulling %s off the scheduler' % s.name)\n sop = copy.deepcopy(s.task)\n sop.set_start_details()\n await self._services.get('data_svc').store(sop)\n self.loop.create_task(self.run_operation(sop))\n await asyncio.sleep(interval)", "def run_scheduled_tasks(self) -> None:\n self.scheduler.run(False)", "def _create_schedules(self):\n\n ''''''", "def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def schedule_start(self):\n self.initialize_scheduler()", "async def test_create_interval(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # assert that the schedule type is interval\n interval_schedule = IntervalSchedule()\n assert interval_schedule.schedule_type == Schedule.Type.INTERVAL\n\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n\n await scheduler.save_schedule(interval_schedule)\n\n await self.stop_scheduler(scheduler)", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def schedule_run_instance(self, context, request_spec,\n admin_password, injected_files,\n requested_networks, is_first_time,\n filter_properties, legacy_bdm_in_spec):\n instance_uuids = request_spec.get('instance_uuids')\n for num, instance_uuid in enumerate(instance_uuids):\n request_spec['instance_properties']['launch_index'] = num\n try:\n #LOG.info(\"jach:context = %(context)s\" % {'context': context.__dict__})\n #LOG.info(\"jach:request_spec = %(request_spec)s\" % locals())\n #LOG.info(\"jach:filter_properties = %(filter_properties)s\" % locals())\n \n host = self._schedule(context, CONF.compute_topic,\n request_spec, filter_properties)\n updated_instance = driver.instance_update_db(context,\n instance_uuid)\n self.compute_rpcapi.run_instance(context,\n instance=updated_instance, host=host,\n requested_networks=requested_networks,\n injected_files=injected_files,\n admin_password=admin_password,\n is_first_time=is_first_time,\n request_spec=request_spec,\n filter_properties=filter_properties,\n legacy_bdm_in_spec=legacy_bdm_in_spec)\n except Exception as ex:\n # NOTE(vish): we don't reraise the exception here to make sure\n # that all instances in the request get set to\n # error properly\n driver.handle_schedule_error(context, ex, instance_uuid,\n request_spec)", "def stop(self):\n if self._stop is not None:\n LOGGER.info(\n \"Stopping schedule[%s], index[%s]\",\n self._description,\n self._index)\n self._stop.set()\n instruction = self._instructions[self._index]\n instruction.stop()", "def start_instances(self, ids):\n self.conn.start_instances(instance_ids=ids)", "def run(self, eventbus):\n LOGGER.info(\"Running schedule: %s\", self._description)\n self._stop = Event()\n while self._run_next(eventbus) and not self._stop.is_set():\n continue\n self._stop = None\n self._index = 0\n LOGGER.info(\"Finished running schedule: %s\", self._description)", "def delete_schedule(sender, instance, **kwargs):\n try:\n instance.schedule_on.delete()\n except (AssertionError, AttributeError) as e:\n print('No on schedule')\n try:\n instance.schedule_off.delete()\n except (AssertionError, AttributeError) as e:\n print('No off schedule')\n try:\n instance.schedule_on.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab on')\n try:\n instance.schedule_off.crontab.delete()\n except (AssertionError, AttributeError) as e:\n print('No Crontab off')", "def Stop_Instances(ids=Get_Running_Instances()):\n ec2 = boto3.client('ec2')\n #call the features client from the boto3 library\n if not ids:\n #if the list of Ec2 instances returned is empty.\n print(\"No Instance in the state Running or pending\")\n else:\n ec2.stop_instances(InstanceIds=ids)\n #stop the instances using their id\n ec2.get_waiter('instance_stopped').wait(InstanceIds=ids)\n #wait for the state of the instances to change to stopped.\n print('instance {} was shutdown'.format(ids))", "async def test_interval_none_repeat(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # assert that the schedule type is interval\n interval_schedule = IntervalSchedule()\n assert interval_schedule.schedule_type == Schedule.Type.INTERVAL\n\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(1)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await asyncio.sleep(12)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def stop_instance(InstanceId=None, Force=None):\n pass", "def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request", "def test_update_instances_schedule_state(self):\n pass", "def stop_instances(self, instance_ids=None, force=False):\r\n params = {}\r\n if force:\r\n params['Force'] = 'true'\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('StopInstances', params,\r\n [('item', Instance)], verb='POST')" ]
[ "0.6934514", "0.6739192", "0.6622401", "0.6519092", "0.64685136", "0.61098325", "0.6099489", "0.609771", "0.60722625", "0.6011122", "0.5967029", "0.5966578", "0.5929629", "0.5904095", "0.58967966", "0.57705957", "0.57697916", "0.57670844", "0.57324207", "0.57235146", "0.57193524", "0.57123333", "0.5694907", "0.56845516", "0.56842804", "0.5664949", "0.5637595", "0.56293964", "0.5624721", "0.5597961" ]
0.7838685
0