query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Applies random color mask to given input image.
def random_colour_masks(image: np.array): colours = [ [0, 255, 0], [0, 0, 255], [255, 0, 0], [0, 255, 255], [255, 255, 0], [255, 0, 255], [80, 70, 180], [250, 80, 190], [245, 145, 50], [70, 150, 250], [50, 190, 190], ] r = np.zeros_like(image).astype(np.uint8) g = np.zeros_like(image).astype(np.uint8) b = np.zeros_like(image).astype(np.uint8) (r[image == 1], g[image == 1], b[image == 1]) = colours[random.randrange(0, 10)] coloured_mask = np.stack([r, g, b], axis=2) return coloured_mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_mask(im, mask, color=(1, 0, 0)):\n masked = np.zeros(im.shape)\n for x, y in mask: masked[x][y] = color\n return masked", "def addMaskImage(img):\r\n [h, w, c] = img.shape\r\n h_start = np.random.randint(h/2,h-1)\r\n w_start = np.random.randint(w/2, w-1)\r\n img[h_start:h-1, :,0]= np.random.randint(0,120)\r\n img[h_start:h-1, :,1]= np.random.randint(0,120) \r\n img[h_start:h-1, :,2]= np.random.randint(0,120) \r\n img[:,w_start:w-1,0]= np.random.randint(0,120)\r\n img[:,w_start:w-1,1]= np.random.randint(0,120) \r\n img[:,w_start:w-1,2]= np.random.randint(0,120) \r\n img = np.uint8(img)\r\n return img, h_start, w_start", "def mask_image(image):\n pass", "def apply_mask(image, mask, color):\r\n for c in range(3):\r\n image[:, :, c] = np.where(mask == 1,\r\n image[:, :, c] + color[c],\r\n image[:, :, c])\r\n return image", "def apply_mask(image, mask, color, alpha=0.5):\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n # cv2.imshow(\"TEST\",image.astype(np.uint8))\n # print(color)\n return image", "def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image", "def apply_mask(image,mask,color,alpha=0.5):\n for c in range(3):\n image[:,:,c] = np.where(mask==1,image[:,:,c]*(1-alpha)+alpha*color[c]*255,image[:,:,c])\n return image", "def apply_mask(image, mask, color, alpha=0.5):\n for c in range(3):\n image[:, :, c] = np.where(\n mask == 1,\n np.array(image[:, :, c], np.float32) * (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image.astype(np.uint8)", "def apply_mask(image, mask, color, alpha=0.5):\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c],\n image[:, :, c])\n return image", "def apply_mask(image, mask, color, alpha=0.5):\n for c in range(3):\n image[:, :, c] = np.where(\n mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image", "def draw_mask(im, mask, alpha=0.5, color=None):\n if color is None:\n color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1]\n color = np.asarray(color, dtype=np.float32)\n im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2),\n im * (1 - alpha) + color * alpha, im)\n im = im.astype('uint8')\n return im", "def generateRandomMask(size, p=0.5):\n mask_array = (np.random.random(size) > p).astype(int)\n mask = sitk.GetImageFromArray(mask_array) \n return mask", "def apply_mask_image(img: PIL.Image.Image, mask: np.ndarray) -> PIL.Image.Image:\n img_arr = np.array(img)\n\n if mask.ndim == 2 and img_arr.ndim != 2:\n masked_image = np.zeros(img_arr.shape, \"uint8\")\n n_channels = img_arr.shape[2]\n for channel_i in range(n_channels):\n masked_image[:, :, channel_i] = img_arr[:, :, channel_i] * mask\n else:\n masked_image = img_arr * mask\n return np_to_pil(masked_image)", "def mask_img(img, mask):\n masked_img = np.zeros(img.shape)\n masked_img[mask] = img[mask]\n return masked_img", "def apply(self, im, random_seed=None, mask=False):\n seed(random_seed)\n\n n_dim = im.ndim\n\n alpha = convert_tuple_of_tuples(self._alpha, n_dim)\n sigma = convert_tuple_of_tuples(self._sigma, n_dim)\n\n np.random.seed(int(2147483647 * random_seed))\n\n shape = im.shape\n\n # print(alpha)\n # print(sigma)\n\n d = list(gaussian_filter((np.random.rand(*shape) * 2 - 1), s) * a for a, s in zip(alpha, sigma))\n\n ran = list(range(n_dim))\n ran[0], ran[1] = ran[1], ran[0]\n x = np.meshgrid(*[np.arange(shape[i]) for i in ran])\n x[0], x[1] = x[1], x[0]\n indices = [np.reshape(xi + di, (-1, 1)) for di, xi in zip(d, x)]\n\n op = change_params(self._opt_params)\n out = map_coordinates(im, indices, order=1, **op).reshape(shape)\n return out", "def generate_mask(\n self,\n noise_background,\n noise_value,\n generated_points_x,\n generated_points_y,\n xsize,\n ysize,\n ):\n\n # background of noise mask\n img_mask = np.random.randint(\n noise_background[0],\n noise_background[1] + 1,\n (ysize, xsize),\n )\n\n # mask of random value\n img_mask_random = np.random.randint(\n low=noise_value[0],\n high=noise_value[1] + 1,\n size=(ysize, xsize),\n )\n\n # insert random value into background\n img_mask[generated_points_y, generated_points_x] = img_mask_random[generated_points_y, generated_points_x]\n\n return img_mask.astype(\"uint8\")", "def apply_mask(image, mask, color, alpha=0.5):\n for n, c in enumerate(color):\n image[:, :, n] = np.where(\n mask == 1,\n image[:, :, n] * (1 - alpha) + alpha * c,\n image[:, :, n]\n )\n return image", "def apply_mask(image, mask, color, alpha=0.5):\r\n for n, c in enumerate(color):\r\n image[:, :, n] = np.where(\r\n mask == 1,\r\n image[:, :, n] * (1 - alpha) + alpha * c,\r\n image[:, :, n]\r\n )\r\n return image", "def chooseRandPixel(mask):\n array = np.transpose(np.nonzero(mask)) # Get the indices of nonzero elements of mask.\n index = random.randint(0,len(array)-1) # Select a random index\n return array[index]", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def random_crop(img, mask):\n if str(img.dtype) != 'uint8':\n img = (img * 255).astype(np.uint8)\n if str(mask.dtype) != 'uint8':\n mask = (mask * 255).astype(np.uint8)\n img = Image.fromarray(img)\n mask = Image.fromarray(mask)\n x, y = img.size\n matrix = 256\n img_list = []\n label_list = []\n for i in range(CROP_NUM):\n x1 = randrange(0, x - matrix)\n y1 = randrange(0, y - matrix)\n img_list.append(img.crop((x1, y1, x1 + matrix, y1 + matrix)))\n label_list.append(mask.crop((x1, y1, x1 + matrix, y1 + matrix)))\n\n return img_list, label_list", "def process_mask(self, image):\n image = np.array(image)\n image[image == 5] = 1 # set un-classified to undestroyed\n return Image.fromarray(image)", "def apply_mask(image, mask, cls2color=cityscapes_cls2color, alpha=0.5):\n masks = []\n for c in range(3):\n mask_copy = mask.copy()\n for k, v in cls2color.items():\n mask_copy[mask == k] = v[c]\n mask_copy = np.expand_dims(mask_copy, 2)\n masks.append(mask_copy)\n mask = np.concatenate(masks, axis=-1)\n if image is not None:\n ret = image*(1-alpha)+alpha*mask/255.0\n else:\n ret = mask/255.0\n\n return ret", "def apply_mask(image, mask):\n image = image.astype(np.uint8)\n image = np.array(image)\n \n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n cv2.blur(image[:, :, c],(40,40)),\n image[:, :, c])\n return image", "def mask_overlay(image, mask, color=(0, 1, 0)):\n mask = np.dstack((mask, mask, mask)) * np.array(color)\n weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.)\n img = image.copy()\n ind = mask[:, :, 1] > 0\n img[ind] = weighted_sum[ind] \n return img", "def get_image_mask_from_ui(image_path, image_size, mask_color, n_channels=3):\n masked_img = np.zeros(image_size, dtype='uint8')\n \n img = Image.open(image_path).load()\n img = np.asarray(img, dtype='int32')\n img = img.reshape(-1, n_channels)\n ignore = np.where(img == mask_color)\n \n masked_img[ignore] = 1\n return masked_img", "def mask_overlay(image, mask, color=(0, 255, 0)):\n mask = np.dstack((mask, mask, mask)) * np.array(color)\n mask = mask.astype(np.uint8)\n weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.)\n img = image.copy()\n ind = mask[:, :, 1] > 0\n img[ind] = weighted_sum[ind]\n return img", "def mask_overlay(image, mask, color=(255, 255, 0)):\n mask = np.dstack((mask, mask, mask)) * np.array(color)\n mask = mask.astype(np.uint8)\n weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.)\n img = image.copy()\n ind = mask[:, :, 1] > 0 \n img[ind] = weighted_sum[ind] \n return img", "def crop_images_color(dataset_dir, is_mask=True):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*_labelIds.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n if is_mask:\n mask = np.ones((img.shape[0], img.shape[1]), dtype=np.uint8) * 255\n\n idx_person = np.where(np.all(img == [220, 20, 60, 255], axis=-1))\n #idx_rider = np.where(np.all(img == [255, 0, 0, 255], axis=-1))\n #idx_void = np.where(np.all(img == [0, 0, 0, 255], axis=-1))\n\n #indices = np.concatenate((idx_person, idx_rider, idx_void), axis=1)\n indices = idx_person\n # mask[indices[0], indices[1], :] = (0, 0, 0, 255)\n mask[indices[0], indices[1]] = 0\n mask = np.reshape(mask, (256, 512))\n\n #scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/CITYSCAPES_crop_random/' + filePath.split('/')[-1],\n # img[offs_h[index]:offs_h_end[index], offs_w[index]:offs_w_end[index] :])\n scipy.misc.imsave('/home/andy/dataset/CITYSCAPES/for_wonderful_chou/image/' + filePath.split('/')[-1],\n img[0:192, :])\n #break", "def random_masks(self):\n # initialize mask\n mask = np.ones((3, self.dim, self.dim))\n\n # generate one of 4 random masks\n choose = 1 # np.random.randint(0, 1)\n if choose == 0:\n mask[:, :self.dim // 2] = 0\n elif choose == 1:\n mask[:, :, :self.dim // 2] = 0\n elif choose == 2:\n mask[:, :, self.dim // 2:] = 0\n elif choose == 3:\n mask[:, self.dim // 2:] = 0\n\n return mask" ]
[ "0.69744325", "0.68479043", "0.6653597", "0.6587677", "0.653253", "0.644138", "0.6433334", "0.6425598", "0.63975954", "0.6355787", "0.6343776", "0.6325185", "0.63067794", "0.62949806", "0.62843984", "0.62772554", "0.62622845", "0.6229754", "0.62034327", "0.61940217", "0.6122129", "0.60672903", "0.6052657", "0.6045998", "0.60409325", "0.6016408", "0.5960845", "0.59590524", "0.5956943", "0.59240764" ]
0.71775967
0
Visualizes prediction classes, bounding boxes, masks over the source image and exports it to output folder.
def visualize_prediction( image: str, masks, boxes, pred_cls, rect_th: float = 3, text_size: float = 3, text_th: float = 3, file_name: str = "inference_result.png", ): # create output folder if not present create_dir("output/") # add bbox and mask to image if present if len(masks) > 0: for i in range(len(masks)): rgb_mask = random_colour_masks(masks[i]) image = cv2.addWeighted(image, 1, rgb_mask, 0.6, 0) cv2.rectangle( image, boxes[i][0], boxes[i][1], color=(0, 255, 0), thickness=rect_th ) cv2.putText( image, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 255, 0), thickness=text_th, ) # save inference result save_path = os.path.join("output/", file_name) cv2.imwrite(save_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _visualize_prediction(self, input, output, target):\n out_b1 = binary(output)\n out_b1 = impose_labels_on_image(input[0, 0, :, :], target[0, :, :], out_b1[0, 1, :, :])\n self.writer.add_image('output', make_grid(out_b1, nrow=8, normalize=False))", "def visualize_image_prediction(image,\n prediction,\n label_map=None,\n **kwargs):\n boxes = prediction[:, 1:5]\n classes = prediction[:, 6].astype(int)\n scores = prediction[:, 5]\n\n return visualize_image(image, boxes, classes, scores, label_map, **kwargs)", "def generate_predictions_on_folder(folder_path, unet, img_size):\n \n testing_dir = folder_path\n\n testing_img_paths = [os.path.join(testing_dir, fname) \n for fname in os.listdir(testing_dir)\n if (fname.endswith(\".png\") or fname.endswith(\".jpg\"))]\n\n x = np.zeros((len(testing_img_paths),) + img_size + (3,), dtype=\"float32\")\n\n for j, path in enumerate(testing_img_paths):\n img = load_img(path)\n # cropping images from 900x720 to 512x512\n img = img.crop(box=(313,99,825,611))\n # resizing image from 512x512 to 256x256\n img = img.resize(img_size)\n x[j] = img\n\n testing_preds = unet.model.predict(x)\n\n def display_mask(i):\n \"\"\"Quick utility to display a model's prediction.\"\"\"\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)\n \n def display_cropped_img(i):\n \"\"\" Utility to display the original image. \"\"\"\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)\n\n # displaying all predictions for images in a folder\n for i in range(0,len(testing_img_paths)):\n # Display input image\n display_cropped_img(i)\n # Display mask predicted by our model\n display_mask(i)", "def visualize(\n samples, predictions, pred_poses, im_ind, crop_size, output_scale,\n model_store, renderer, vis_dir):\n tf.logging.info('Visualization for: {}'.format(\n samples[common.IMAGE_PATH][0].decode('utf8')))\n\n # Size of a visualization grid tile.\n tile_size = (300, 225)\n\n # Extension of the saved visualizations ('jpg', 'png', etc.).\n vis_ext = 'jpg'\n\n # Font settings.\n font_size = 10\n font_color = (0.8, 0.8, 0.8)\n\n # Intrinsics.\n K = samples[common.K][0]\n output_K = K * output_scale\n output_K[2, 2] = 1.0\n\n # Tiles for the grid visualization.\n tiles = []\n\n # Size of the output fields.\n output_size =\\\n int(output_scale * crop_size[0]), int(output_scale * crop_size[1])\n\n # Prefix of the visualization names.\n vis_prefix = '{:06d}'.format(im_ind)\n\n # Input RGB image.\n rgb = np.squeeze(samples[common.IMAGE][0])\n vis_rgb = visualization.write_text_on_image(\n misc.resize_image_py(rgb, tile_size).astype(np.uint8),\n [{'name': '', 'val': 'input', 'fmt': ':s'}],\n size=font_size, color=font_color)\n tiles.append(vis_rgb)\n\n # Visualize the ground-truth poses.\n if FLAGS.vis_gt_poses:\n\n gt_poses = []\n for gt_id, obj_id in enumerate(samples[common.GT_OBJ_IDS][0]):\n q = samples[common.GT_OBJ_QUATS][0][gt_id]\n R = transform.quaternion_matrix(q)[:3, :3]\n t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))\n gt_poses.append({'obj_id': obj_id, 'R': R, 't': t})\n\n vis_gt_poses = vis.visualize_object_poses(rgb, K, gt_poses, renderer)\n vis_gt_poses = visualization.write_text_on_image(\n misc.resize_image_py(vis_gt_poses, tile_size),\n [{'name': '', 'val': 'gt poses', 'fmt': ':s'}],\n size=font_size, color=font_color)\n tiles.append(vis_gt_poses)\n\n # Visualize the estimated poses.\n if FLAGS.vis_pred_poses:\n vis_pred_poses = vis.visualize_object_poses(rgb, K, pred_poses, renderer)\n vis_pred_poses = visualization.write_text_on_image(\n misc.resize_image_py(vis_pred_poses, tile_size),\n [{'name': '', 'val': 'pred poses', 'fmt': ':s'}],\n size=font_size, color=font_color)\n tiles.append(vis_pred_poses)\n\n # Ground-truth object labels.\n if FLAGS.vis_gt_obj_labels and common.GT_OBJ_LABEL in samples:\n obj_labels = np.squeeze(samples[common.GT_OBJ_LABEL][0])\n obj_labels = obj_labels[:crop_size[1], :crop_size[0]]\n obj_labels = vis.colorize_label_map(obj_labels)\n obj_labels = visualization.write_text_on_image(\n misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),\n [{'name': '', 'val': 'gt obj labels', 'fmt': ':s'}],\n size=font_size, color=font_color)\n tiles.append(obj_labels)\n\n # Predicted object labels.\n if FLAGS.vis_pred_obj_labels:\n obj_labels = np.squeeze(predictions[common.PRED_OBJ_LABEL][0])\n obj_labels = obj_labels[:crop_size[1], :crop_size[0]]\n obj_labels = vis.colorize_label_map(obj_labels)\n obj_labels = visualization.write_text_on_image(\n misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),\n [{'name': '', 'val': 'predicted obj labels', 'fmt': ':s'}],\n size=font_size, color=font_color)\n tiles.append(obj_labels)\n\n # Predicted object confidences.\n if FLAGS.vis_pred_obj_confs:\n num_obj_labels = predictions[common.PRED_OBJ_CONF].shape[-1]\n for obj_label in range(num_obj_labels):\n obj_confs = misc.resize_image_py(np.array(\n predictions[common.PRED_OBJ_CONF][0, :, :, obj_label]), tile_size)\n obj_confs = (255.0 * obj_confs).astype(np.uint8)\n obj_confs = np.dstack([obj_confs, obj_confs, obj_confs]) # To RGB.\n obj_confs = visualization.write_text_on_image(\n obj_confs, [{'name': 'cls', 'val': obj_label, 'fmt': ':d'}],\n size=font_size, color=font_color)\n tiles.append(obj_confs)\n\n # Visualization of ground-truth fragment fields.\n if FLAGS.vis_gt_frag_fields and common.GT_OBJ_IDS in samples:\n vis.visualize_gt_frag(\n gt_obj_ids=samples[common.GT_OBJ_IDS][0],\n gt_obj_masks=samples[common.GT_OBJ_MASKS][0],\n gt_frag_labels=samples[common.GT_FRAG_LABEL][0],\n gt_frag_weights=samples[common.GT_FRAG_WEIGHT][0],\n gt_frag_coords=samples[common.GT_FRAG_LOC][0],\n output_size=output_size,\n model_store=model_store,\n vis_prefix=vis_prefix,\n vis_dir=vis_dir)\n\n # Visualization of predicted fragment fields.\n if FLAGS.vis_pred_frag_fields:\n vis.visualize_pred_frag(\n frag_confs=predictions[common.PRED_FRAG_CONF][0],\n frag_coords=predictions[common.PRED_FRAG_LOC][0],\n output_size=output_size,\n model_store=model_store,\n vis_prefix=vis_prefix,\n vis_dir=vis_dir,\n vis_ext=vis_ext)\n\n # Build and save a visualization grid.\n grid = vis.build_grid(tiles, tile_size)\n grid_vis_path = os.path.join(\n vis_dir, '{}_grid.{}'.format(vis_prefix, vis_ext))\n inout.save_im(grid_vis_path, grid)", "def save(\n self,\n output_folder: str,\n box_thickness: int = 2,\n show_confidence: bool = True,\n color_mapping: Optional[List[Tuple[int, int, int]]] = None,\n target_bboxes: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n target_bboxes_format: Optional[str] = None,\n target_class_ids: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n ) -> None:\n if output_folder:\n os.makedirs(output_folder, exist_ok=True)\n\n target_bboxes, target_class_ids = self._check_target_args(target_bboxes, target_bboxes_format, target_class_ids)\n\n for i, (prediction, target_bbox, target_class_id) in enumerate(zip(self._images_prediction_lst, target_bboxes, target_class_ids)):\n image_output_path = os.path.join(output_folder, f\"pred_{i}.jpg\")\n prediction.save(output_path=image_output_path, box_thickness=box_thickness, show_confidence=show_confidence, color_mapping=color_mapping)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n #submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, \"submit\")\n #os.makedirs(submit_dir)\n\n # Read dataset\n img_ids = []\n dataset_dir = os.path.join(dataset_dir, subset)\n image_file = os.listdir(dataset_dir)\n #submission = []\n for img in image_file:\n if not img.startswith('.'):\n img_file = os.path.join(dataset_dir, img)\n image = skimage.io.imread(img_file)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # Detect object\n\t\t\t\n r = model.detect([image])[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = img.split(\".\")[0]\n #rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n #submission.append(rle)\n # Save image with masks\n visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'],\n #show_bbox=False, show_mask=False,\n title=\"Predictions\")\n plt.savefig(\"{}/{}.png\".format(submit_dir, source_id))\n\n\n\t\t\n # Save to csv file", "def demo(sess, net, image_name):\n # Load the demo image\n global CLASS_NAME\n global CHECK\n CHECK = 0\n # 读取的截图所在的位置\n # im_file = Cnn_path + \"data/VOCdevkit2007/VOC2007/JPEGImages/\" + image_name\n curpath = os.path.dirname(os.path.realpath(__file__))\n im_file = curpath + \"\\\\data\\\\VOCdevkit2007\\\\VOC2007\\\\JPEGImages\\\\\" + image_name\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n # score 阈值,最后画出候选框时需要,>thresh才会被画出\n CONF_THRESH = 0.5\n # 非极大值抑制的阈值,剔除重复候选框\n NMS_THRESH = 0.3\n # 利用enumerate函数,获得CLASSES中 类别的下标cls_ind和类别名cls\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n # 取出bbox ,score\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n # 将bbox,score 一起存入dets\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n # 进行非极大值抑制,得到抑制后的 dets\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # 画框\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n if CHECK == 0:\n CLASS_NAME = \"None\"\n # im = im[:, :, (2, 1, 0)]\n # fig, ax = plt.subplots()\n # ax.imshow(im, aspect='equal')\n # ax.set_title(\"None\",fontsize=10)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n # RES[INDS.__getitem__(image_name.split(\"_\")[0])][INDS.__getitem__(CLASS_NAME)]+=1\n # plt.savefig(\"./output/\"+CLASS_NAME+\"_\" + image_name)\n # plt.savefig(\"./output/\" + image_name)\n MAX_SCORE[0] = 0.0", "def show_result(inputs, labels, outputs):\n num_classes = outputs.size(1)\n outputs = outputs.argmax(dim=1).detach().cpu().numpy()\n if num_classes == 2:\n outputs *= 255\n mask = outputs[0].reshape((360, 640))\n fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 5))\n ax[0].imshow(inputs[0, :3, :, ].detach().cpu().numpy().transpose((1, 2, 0)))\n ax[0].set_title('Image')\n ax[1].imshow(labels[0].detach().cpu().numpy().reshape((360, 640)), cmap='gray')\n ax[1].set_title('gt')\n plt.show()\n plt.figure()\n plt.imshow(mask, cmap='gray')\n plt.title('Pred')\n plt.show()", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def plot_reconstruction_images(inputs, pred, name):\n\n plt.clf()\n nb_plots = min(inputs.shape[0], 4)\n #inputs\n for i in range(nb_plots):\n ax = plt.subplot2grid((2, nb_plots), (0, i), rowspan=1, colspan=1)\n ax.imshow(inputs[i])\n ax.axis('off')\n #pred\n for i in range(nb_plots):\n ax = plt.subplot2grid((2, nb_plots), (1, i), rowspan=1, colspan=1)\n ax.imshow(pred[i])\n ax.axis('off')\n\n if name != None:\n plt.savefig(name, format='svg', bbox_inches='tight')\n else:\n plt.show()", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def demo(sess, net, image_name):\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n #im_file = os.path.join('/home/corgi/Lab/label/pos_frame/ACCV/training/000001/',image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print (('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: List[SegmObject]) -> np.ndarray:\n draw_layer_tag = f'draw_layer_{self.id}_{self.draw_layer_index^1}'\n mix_factor = .3\n\n self.layer = dpg.add_draw_layer(\n parent=f'main_window_{self.id}',\n tag=draw_layer_tag,\n show=False\n )\n\n for out in output_data:\n if out.score < self.score_threshold:\n continue\n\n mask = cv2.resize(\n out.mask,\n (self.width, self.height),\n cv2.INTER_NEAREST\n ).astype(np.uint8)\n color_mask = np.tile(mask[..., None]/255., [1, 1, 4])\n color_image = np.tile(\n self.class_colors[out.clsname],\n [self.height, self.width, 1]\n )\n img += color_image * color_mask * mix_factor\n contour, _ = cv2.findContours(\n mask.astype(np.uint8),\n cv2.RETR_CCOMP,\n cv2.CHAIN_APPROX_NONE\n )\n color = np.array(self.class_colors[out.clsname])\n color = tuple((255*color).astype(np.uint8))\n for c in contour:\n dpg.draw_polyline(\n parent=draw_layer_tag,\n points=list(c[:, 0, :]),\n color=color,\n thickness=2\n )\n description = f'{out.clsname} [{out.score * 100:.2f}%]'\n dpg.draw_text(\n parent=draw_layer_tag,\n text=description,\n pos=(c[:, 0, 0].min() + _PADDING,\n c[:, 0, 1].min() + _PADDING),\n color=color,\n size=_FONT_SIZE\n )\n\n return img", "def create_img(X_train, X_test, y_train, y_test, labels, model, visualizer, upsampled, IMG_OUTPUT_FILEPATH):\n viz = Visualizer(X_train, X_test, y_train, y_test, labels, model, visualizer, upsampled=upsampled)\n viz.evaluate()\n if upsampled == True:\n outpath_ = IMG_OUTPUT_FILEPATH + str(model).split('(')[0] + '/' + visualizer + '_upsampled.png'\n else:\n outpath_ = IMG_OUTPUT_FILEPATH + str(model).split('(')[0] + '/' + visualizer + '.png'\n viz.visualizer.show(outpath=outpath_, clear_figure=True)", "def run_ML_onImg_and_display(self):\r\n self.Matdisplay_Figure.clear()\r\n ax1 = self.Matdisplay_Figure.add_subplot(111)\r\n \r\n # Depends on show_mask or not, the returned figure will be input raw image with mask or not.\r\n self.MLresults, self.Matdisplay_Figure_axis, self.unmasked_fig = self.ProcessML.DetectionOnImage(self.MLtargetedImg, axis = ax1, show_mask=False, show_bbox=False) \r\n self.Mask = self.MLresults['masks']\r\n self.Label = self.MLresults['class_ids']\r\n self.Score = self.MLresults['scores']\r\n self.Bbox = self.MLresults['rois']\r\n\r\n self.SelectedCellIndex = 0\r\n self.NumCells = int(len(self.Label))\r\n self.selected_ML_Index = []\r\n self.selected_cells_infor_dict = {}\r\n \r\n self.Matdisplay_Figure_axis.imshow(self.unmasked_fig.astype(np.uint8))\r\n \r\n self.Matdisplay_Figure.tight_layout()\r\n self.Matdisplay_Canvas.draw()", "def postprocess(out_heatmaps, org_im, org_im_shape, org_im_path, output_dir,\n visualization):\n preds, num_joints = save_predict_results(out_heatmaps)\n scale_horizon = org_im_shape[1] * 1.0 / 384\n scale_vertical = org_im_shape[0] * 1.0 / 384\n preds = np.multiply(preds, (scale_horizon, scale_vertical)).astype(int)\n if visualization:\n icolor = (255, 137, 0)\n ocolor = (138, 255, 0)\n rendered_im = org_im.copy()\n for j in range(num_joints):\n x, y = preds[j]\n cv2.circle(rendered_im, (x, y), 3, icolor, -1, 16)\n cv2.circle(rendered_im, (x, y), 6, ocolor, 1, 16)\n # check whether output_dir is existent or not\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n elif os.path.isfile(output_dir):\n os.remove(output_dir)\n os.makedirs(output_dir)\n # save image\n save_im_name = os.path.join(\n output_dir, 'rendered_{}.jpg'.format(\n os.path.splitext(os.path.basename(org_im_path))[0]))\n cv2.imwrite(save_im_name, rendered_im)\n print('image saved in {}'.format(save_im_name))\n\n # articulation\n articulation_points = OrderedDict()\n articulation_points['left_ankle'] = list(preds[0])\n articulation_points['left_knee'] = list(preds[1])\n articulation_points['left_hip'] = list(preds[2])\n articulation_points['right_hip'] = list(preds[3])\n articulation_points['right_knee'] = list(preds[4])\n articulation_points['right_ankle'] = list(preds[5])\n articulation_points['pelvis'] = list(preds[6])\n articulation_points['thorax'] = list(preds[7])\n articulation_points['upper neck'] = list(preds[8])\n articulation_points['head top'] = list(preds[9])\n articulation_points['right_wrist'] = list(preds[10])\n articulation_points['right_elbow'] = list(preds[11])\n articulation_points['right_shoulder'] = list(preds[12])\n articulation_points['left_shoulder'] = list(preds[13])\n articulation_points['left_elbow'] = list(preds[14])\n articulation_points['left_wrist'] = list(preds[15])\n return articulation_points", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()", "def show_performance(model):\n val_image_ids_ = [i for i in val_image_ids]\n np.random.shuffle(val_image_ids_)\n\n df_val = area_filter(val_image_ids_, val_coco)\n image_id = df_val['image_id'].iloc[0]\n annotation_ids = df_val[df_val['image_id'] == image_id]['annotation_id'].tolist()\n\n image_json = val_coco.loadImgs([image_id])[0]\n raw_image = cv2.imread(os.path.join(\"{}/{}/{}\".format(data_dir, val_type, image_json['file_name'])))\n height, width, _ = raw_image.shape\n\n # decode the mask, using annotation id created at the group by above\n binary_mask = process_mask(val_coco, annotation_ids, width, height)\n\n # preprocess input and mask (resize to 128, scale to [0, 1))\n input_image, input_mask = preprocess(raw_image, binary_mask)\n\n input_mask = np.expand_dims(input_mask, axis=-1)\n predicted_mask = model.predict(np.array([input_image]))[0]\n\n plt.figure(figsize=(20, 20))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n display_list = [input_image[:, :, ::-1], input_mask, predicted_mask]\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(array_to_img(display_list[i]))\n plt.axis('off')\n plt.show()", "def display_predictions(self, x, pred, y):\r\n fig = plt.figure(figsize=(15, 15))\r\n\r\n for i in range(self.batch_size):\r\n vals = x[i, :, :, :]\r\n sub = fig.add_subplot(1, self.batch_size, i + 1)\r\n val = pred[i]\r\n val2 = y[i]\r\n res = self.classes[val]\r\n res2 = self.classes[val2]\r\n\r\n sub.set_title(\"predicted = \" + res + \"\\n\" + \"Actual = \" + res2)\r\n plt.axis('off')\r\n img = np.asarray(vals)\r\n img = np.transpose(img, (1, 2, 0))\r\n # Get Specific channels for rgb\r\n rgbimg = self.get_rgb(img, 61, 38, 19)\r\n\r\n # Normalize Inputs\r\n imgmin, imgmax = rgbimg.min(), rgbimg.max()\r\n rgbimg = (rgbimg - imgmin) / (imgmax - imgmin)\r\n plt.imshow(rgbimg)\r\n file_loc = [str(self.training_path) + '\\\\checkpoints' +\r\n '\\\\' + 'predictions.jpg']\r\n s = \"\"\r\n s = s.join(file_loc)\r\n pred_path = Path(s)\r\n plt.savefig(pred_path)\r\n plt.show()", "def image_model_predict(input_ms_image_filename, input_pan_image_filename, pan_img_height_size, pan_img_width_size, \r\n fitted_model, write, output_filename):\r\n \r\n with rasterio.open(input_ms_image_filename) as f:\r\n metadata = f.profile\r\n ms_img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(input_pan_image_filename) as g:\r\n metadata_pan = g.profile\r\n pan_img = g.read(1)\r\n \r\n pan_img = np.expand_dims(pan_img, axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n class_layer = np.zeros((pan_img.shape[0], pan_img.shape[1]))\r\n \r\n img_pan_holder = []\r\n img_ms_holder = []\r\n \r\n for i in range(0, pan_img.shape[0] - pan_img_height_size, int(ms_to_pan_ratio)):\r\n for j in range(0, pan_img.shape[1] - pan_img_width_size, int(ms_to_pan_ratio)):\r\n img_pan_iter = pan_img[i : i + pan_img_height_size, j : j + pan_img_width_size, 0]\r\n img_pan_holder.append(img_pan_iter)\r\n \r\n for i in range(0, int(ms_img.shape[0] - (pan_img_height_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n for j in range(0, int(pan_img.shape[1] - (pan_img_width_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n img_ms_iter = ms_img[i : int(i + (pan_img_height_size / ms_to_pan_ratio)), \r\n j : int(j + (pan_img_width_size / ms_to_pan_ratio)), \r\n 0 : metadata['count']]\r\n img_ms_holder.append(img_ms_iter)\r\n \r\n img_pan_array = np.concatenate(img_pan_holder, axis = 0)\r\n img_ms_array = np.concatenate(img_ms_holder, axis = 0)\r\n \r\n pred_array = np.argmax(fitted_model.predict([img_ms_array, img_pan_array]), axis = 1)\r\n \r\n n = 0 \r\n for i in range(int(pan_img_height_size / 2), pan_img.shape[0] - int(pan_img_height_size / 2), int(ms_to_pan_ratio)):\r\n for j in range(int(pan_img_width_size / 2), pan_img.shape[1] - int(pan_img_width_size / 2), int(ms_to_pan_ratio)):\r\n class_layer[i, j] = pred_array[n]\r\n n += 1\r\n \r\n if write:\r\n with rasterio.open(output_filename, 'w', **metadata_pan) as dst:\r\n dst.write(class_layer)\r\n \r\n return class_layer", "def plot_predictions(images, filename):\n imagex = format_image(images, 4)\n mosaic = create_mosaic(imagex, 2, 2)\n plt.figure(figsize=(12, 12))\n plt.imshow(mosaic, cmap='gray')\n plt.axis('off')\n plt.savefig(filename + '.png', bbox_inches='tight')", "def test():\n\n # load image and adjust its format\n if MEMORY_CACHE:\n test_input = dataset[0]['file']\n oriImg = test_input.byte().permute((1, 2, 0)).numpy() # B,G,R order\n else:\n oriImg = cv2.imread(dataset[0]['file']) # B,G,R order\n test_input = torch.from_numpy(oriImg).permute((2, 0, 1)).float()\n \n # transfer data on GPU on demand\n if CUDA:\n test_input = test_input.cuda()\n\n # perform prediction\n net.eval()\n with torch.no_grad():\n result = net(test_input.unsqueeze(0))[0]\n\n print(result)\n\n # draw rectangles and its class\n img = cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB)\n for box, label, score in zip(result['boxes'], result['labels'], result['scores']):\n # if score > 0.5:\n if label < len(orig_labels):\n img = cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)\n img = cv2.putText(img, '{}: {:.0%}'.format(orig_labels[label], score), (box[0] + 5, box[3] - 5), cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 0), 2, cv2.LINE_AA)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def main(output_dir, img_size, max_boxes, train_size, val_size):\n im_id = 0\n ann_id = 0\n\n def make_split(split, split_size):\n nonlocal im_id\n nonlocal ann_id\n\n image_dir = join(output_dir, 'train')\n make_dir(image_dir)\n\n images = []\n annotations = []\n for _ in range(split_size):\n img, boxes = make_scene(img_size, max_boxes)\n img = np.transpose(img, (1, 2, 0))\n file_name = '{}.png'.format(im_id)\n Image.fromarray(img).save(\n join(image_dir, file_name))\n images.append({\n 'id': im_id,\n 'height': img_size,\n 'width': img_size,\n 'file_name': file_name\n })\n for box in boxes:\n annotations.append({\n 'id': ann_id,\n 'image_id': im_id,\n 'category_id': 1,\n 'area': (box[2] - box[0]) * (box[3] - box[1]),\n 'bbox': [box[1], box[0], box[3]-box[1], box[2]-box[0]]\n })\n ann_id += 1\n im_id += 1\n\n categories = [{'id': 1, 'name': 'rectangle'}]\n labels = {\n 'images': images,\n 'annotations': annotations,\n 'categories': categories}\n json_to_file(labels, join(output_dir, '{}.json'.format(split)))\n\n make_split('train', train_size)\n make_split('valid', val_size)", "def demo(net, image_name, classes):\n\n # Load pre-computed Selected Search object proposals\n # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')\n test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'\n box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')\n obj_proposals = sio.loadmat(box_file)['boxes']\n\n # Load the demo image\n test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'\n # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')\n im_file = os.path.join(test_images_path, image_name + '.jpg')\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im, obj_proposals)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0\n NMS_THRESH = 0.3\n max_inds = 0\n max_score = 0.0\n for cls in classes:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,\n # CONF_THRESH, image_name)\n #if get_max!=[]: \n\n [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)\n #print image_name,cls,tmp\n\n #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)\n #print dets[:,-1]\n #print image_name,max_score\n file.writelines([image_name,'\\t',cls,'\\t',str(tmp),'\\n'])\n if(max_score<tmp):\n max_score=tmp\n cls_max=cls\n print image_name,cls_max,max_score", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def generate_images(self, model, test_input, step, dst_dir):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n filename = os.path.join(dst_dir, 'generated_imgs_at_step_{:06d}.png'.format(step))\n plt.savefig(filename)", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")" ]
[ "0.6950687", "0.65381104", "0.64658016", "0.64365315", "0.63709706", "0.63340414", "0.6260973", "0.6257987", "0.62485135", "0.6241173", "0.62216526", "0.6170909", "0.6166584", "0.6165491", "0.61395955", "0.6133383", "0.6118839", "0.61138433", "0.6113759", "0.6082543", "0.60813713", "0.60707927", "0.60637987", "0.605877", "0.6054377", "0.60398793", "0.6028199", "0.60274744", "0.6025144", "0.6020547" ]
0.6905549
1
Crops the predicted bounding box regions and exports them to output folder.
def crop_inference_bbox(image, boxes, file_name="cropped_inference_result"): # create output folder if not present create_dir("output/") # crop detections if len(boxes) > 0: for ind in range(len(boxes)): cropped_img = image[ int(boxes[ind][0][1]) : int(boxes[ind][1][1]), int(boxes[ind][0][0]) : int(boxes[ind][1][0]), :, ] save_path = os.path.join("output/", file_name + "_" + str(ind) + ".png") cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def save(\n self,\n output_folder: str,\n box_thickness: int = 2,\n show_confidence: bool = True,\n color_mapping: Optional[List[Tuple[int, int, int]]] = None,\n target_bboxes: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n target_bboxes_format: Optional[str] = None,\n target_class_ids: Optional[Union[np.ndarray, List[np.ndarray]]] = None,\n ) -> None:\n if output_folder:\n os.makedirs(output_folder, exist_ok=True)\n\n target_bboxes, target_class_ids = self._check_target_args(target_bboxes, target_bboxes_format, target_class_ids)\n\n for i, (prediction, target_bbox, target_class_id) in enumerate(zip(self._images_prediction_lst, target_bboxes, target_class_ids)):\n image_output_path = os.path.join(output_folder, f\"pred_{i}.jpg\")\n prediction.save(output_path=image_output_path, box_thickness=box_thickness, show_confidence=show_confidence, color_mapping=color_mapping)", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def save_regions_bmp(self, robot, output_directory):\n # Make ouput directory if it doesn't already exist\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n regions_path = os.path.join(output_directory, \"regions.bmp\")\n if os.path.exists(regions_path):\n return\n image = Image.new(\"L\", OUTPUT_BMP_DIMENSIONS)\n draw = ImageDraw.Draw(image)\n pixels = image.load()\n output_width, output_height = image.size\n output_bounds = (0, output_height, output_width, 0)\n # Set default colour\n for i in range(output_width):\n for j in range(output_height):\n pixels[i, j] = OUTPUT_DEFAULT_COLOUR\n # Add regions\n for region in self.regions:\n translated_bounds = get_translated_bounds(region.bounds, self.bounds, output_bounds)\n left, top, right, bottom = list(map(int, translated_bounds))\n if robot.can_hold(region.permeability):\n colour = OUTPUT_VALID_COLOUR\n else:\n colour = OUTPUT_INVALID_COLOUR\n draw.rectangle((left, bottom, right, top), fill=colour)\n image.save(regions_path)\n LOGGER.debug(\"Saved regions!\")", "def crop_all_bounding_boxes(boxes, image_path, crop_path):\n index = 0\n for box in boxes:\n object_class = box[0]\n cropped_image = crop_bounding_box_from_image(\n box, image_path, crop_path)\n filename = object_class + \"_\" + os.path.basename(image_path)\n while os.path.isfile(os.path.join(crop_path, filename)):\n print('File %s already exists!' % (filename))\n index += 1\n filename = str(index) + \"_\" + filename\n cropped_image.save(filename)", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def snapshot(self):\n net = self.solver.net\n \n if cfg.TRAIN.BBOX_REG:\n orig_0 = []; orig_1 = [];\n # save original values\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n print 'adjusting {} parameters'.format(bbox_pred_param_name)\n orig_0.append(net.params[bbox_pred_param_name][0].data.copy())\n orig_1.append(net.params[bbox_pred_param_name][1].data.copy())\n\n # scale and shift with bbox reg unnormalization; then save snapshot\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n net.params[bbox_pred_param_name][0].data[...] = \\\n (net.params[bbox_pred_param_name][0].data *\n self.bbox_stds[:, np.newaxis])\n net.params[bbox_pred_param_name][1].data[...] = \\\n (net.params[bbox_pred_param_name][1].data *\n self.bbox_stds + self.bbox_means)\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX\n if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')\n filename = (self.solver_param.snapshot_prefix + infix +\n '_iter_{:d}'.format(self.solver.iter) + '.caffemodel')\n filename = os.path.join(self.output_dir, filename)\n\n net.save(str(filename))\n print 'Wrote snapshot to: {:s}'.format(filename)\n\n if cfg.TRAIN.BBOX_REG:\n # restore net to original state\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n net.params[bbox_pred_param_name][0].data[...] = orig_0[i]\n net.params[bbox_pred_param_name][1].data[...] = orig_1[i]", "def crop_save( img_path_filename, lines_boxes, lines_texts, lines_probs, filename, basename, output_dir_name ):\n\t# Read the image\n\timage = Image.open( img_path_filename )\n\t# Get image's size\n\twidth, height = image.size\n\n\ti = 0\n\ttext_local = \"\"\n\ttext_global = \"\"\n\twhile i < len(lines_boxes):\n\t\t##################################################################################################\n\t\t# Left Upper Corner\n\t\tx1 = lines_boxes[i][0]\n\t\tx1 = x1 - 8\n\t\tif x1 < 0:\n\t\t\tx1 = 0\n\n\t\ty1 = lines_boxes[i][1]\n\t\ty1 = y1 - 1\n\t\tif y1 < 0:\n\t\t\ty1 = 0\n\n\t\t# Right Lower Corner\n\t\tx2 = lines_boxes[i][2]\n\t\tx2 = x2 + 10\n\t\tif x2 > (width - 1):\n\t\t\tx2 = width - 1\n\n\t\ty2 = lines_boxes[i][3]\n\t\ty2 = y2 + 1\n\t\tif y2 > (height - 1):\n\t\t\ty2 = height - 1\n\n\t\t# Crop the block and save it\n\t\tn_line = \"%03d\" % (i+1)\n\t\tline_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".jpg\"\t\t\n\n\t\timg_cropped = image.crop( (x1, y1, x2, y2) )\n\t\timg_cropped.save( line_filename, 'JPEG', quality = 100 )\n\n\t\t##################################################################################################\n\t\t# Create the information about the cropped line for the local and global text files\n\t\ttext_line = basename + \"_\" + n_line + \".jpg\\t\" + str(x1) + \"\\t\" + str(y1) + \"\\t\" + str(x2) + \"\\t\" + str(y2) + \"\\t\" + ''.join(lines_texts[i]) + \"\\n\"\n\t\ttext_local += text_line\n\t\ttext_global += filename + \"\\t\" + text_line\n\n\t\t##################################################################################################\n\t\t# Creation of the text and probability file for each line\n\t\tj = 0\n\t\tcontent_text_file = \"\"\n\t\tcontent_prob_file = \"\"\n\t\twhile j<len(lines_texts[i]):\n\t\t\tcontent_text_file += lines_texts[i][j]\n\t\t\tcontent_prob_file += lines_texts[i][j] + '\\t' + str(lines_probs[i][j]) + '\\n'\n\t\t\tj = j + 1\n\t\t# Write to disk the text file\n\t\ttext_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".txt\"\n\t\twith open( text_filename, \"w+\" ) as f_text:\n\t\t\tf_text.write( content_text_file )\n\t\t# Write to disk the probabilities file\n\t\tprob_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".prob\"\n\t\twith open( prob_filename, \"w+\" ) as f_prob:\n\t\t\tf_prob.write( content_prob_file )\n\n\t\ti = i + 1\n\n\treturn( text_local, text_global )", "def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):\n if output_folder is not None:\n self.output_folder = output_folder\n\n output_folder_gt = os.path.join(self.output_folder, \"gt_segmentations\")\n maybe_mkdir_p(output_folder_gt)\n for j, case in enumerate(list_of_files):\n if case[-1] is not None:\n shutil.copy(case[-1], output_folder_gt)\n\n list_of_args = []\n for j, case in enumerate(list_of_files):\n case_identifier = get_case_identifier(case)\n list_of_args.append((case, case_identifier, overwrite_existing))\n\n p = Pool(self.num_threads)\n p.starmap(self.load_crop_save, list_of_args)\n p.close()\n p.join()", "def postprocess(frame, outs, save_image=False):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # non maximum suppression to eliminate redundant overlapping boxes with lower confidences\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\n for i in indices:\n i = i[0]\n # Skip classes that aren't people\n if classIds[i] != 0:\n continue\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n if save_image:\n # Save cropped image of detected object\n class_name = classes[classIds[i]]\n dimensions = (top, top + height, left, left + width)\n utils.write_image(frame, \"output/yolo\", class_name, dimensions)\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)", "def __call__(self, results):\n img = results['img']\n polys = results[self.instance_key]\n x_min, y_min, x_max, y_max = self._random_crop(img, polys)\n kept_idx = []\n for idx, poly in enumerate(polys):\n if np.all((poly[0::2] >= x_min) & (poly[1::2] >= y_min) & \\\n (poly[0::2] <= x_max) & (poly[1::2] <= y_max)):\n kept_idx.append(idx)\n kept_idx = np.array(kept_idx)\n # crop img\n results['img'] = img[y_min : y_max, x_min : x_max, :]\n results['img_shape'] = results['img'].shape\n # crop mask\n for key in results.get('mask_fields', []):\n results[key] = results[key].crop(np.array([x_min, y_min, x_max, y_max]))\n # crop box\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n box = np.array(box)\n if np.all((np.min(box[0::2]) >= x_min) & (np.min(box[1::2]) >= y_min) & \\\n (np.max(box[0::2]) <= x_max) & (np.max(box[1::2]) <= y_max)):\n box[0::2] = (box[0::2] - x_min)\n box[1::2] = (box[1::2] - y_min)\n bboxes.append(box)\n # no valid box in img\n if len(bboxes) == 0:\n if key == 'gt_bboxes':\n bboxes = np.zeros((0, 4), dtype=np.float32)\n else:\n bboxes = np.zeros((0, 8), dtype=np.float32)\n results[key] = bboxes\n # calculate the kept text and label\n for key in ['gt_labels', 'gt_texts']:\n if key in results:\n results[key] = [results[key][idx] for idx in kept_idx]\n # calculate the kept mask\n for key in ['gt_masks']:\n if key in results:\n ori_mask = results[key].masks\n kept_mask = [ori_mask[idx] for idx in kept_idx]\n if len(kept_mask) > 0:\n kept_mask = np.stack(kept_mask)\n else:\n kept_mask = np.empty((0, results[key].height, results[key].width), dtype=np.float32)\n results[key] = BitmapMasks(kept_mask, results[key].height, results[key].width)\n return results", "def crop_acc_mask(images_dir, images_output_dir, masks_dir, mask_suffix=None, masks_output_dir=None): \n image_suffix_list = [\"C0\", \"DE\", \"T2\"]\n if not os.path.exists(images_output_dir):\n os.makedirs(images_output_dir)\n if masks_output_dir is not None and (not os.path.exists(masks_output_dir)):\n os.makedirs(masks_output_dir)\n margin = [0, 30, 30]\n masks_list = os.listdir(masks_dir)\n masks_list.sort()\n json_dict = OrderedDict()\n for mask in masks_list:\n mask_path = os.path.join(masks_dir, mask)\n if mask.endswith(\".nii.gz\"):\n print(\"#\" * 11 *11)\n print(mask_path)\n mask_sitk = sitk.ReadImage(mask_path)\n mask_npy = sitk.GetArrayFromImage(mask_sitk)\n mask_shape = mask_npy.shape\n crop_bbox_min, crop_bbox_max = get_ND_bounding_box(mask_npy, margin=margin)\n # do not crop along depth dimension\n crop_bbox_min[0] = 0\n crop_bbox_max[0] = mask_shape[0]\n print(crop_bbox_min, crop_bbox_max)\n json_dict[mask_path] = {\"crop_bbox_min\": crop_bbox_min, \"crop_bbox_max\": crop_bbox_max}\n mask_output_npy = crop_ND_volume_with_bounding_box(mask_npy, crop_bbox_min, crop_bbox_max)\n if mask_suffix is not None:\n mask = mask.replace(\"_\" + mask_suffix + \".nii.gz\", \".nii.gz\")\n if masks_output_dir is not None:\n save_cropped_array_as_nifty_volume(mask_output_npy, os.path.join(masks_output_dir, mask), mask_sitk)\n save_cropped_array_as_nifty_volume(convert_label(mask_output_npy, [1, 2, 3, 4, 5], [1, 2, 3, 1, 1]), \\\n os.path.join(images_output_dir, mask.replace(\".nii.gz\", \"_{0:04d}.nii.gz\".format(len( \\\n image_suffix_list)))), mask_sitk)\n for i, image_suffix in enumerate(image_suffix_list):\n image = mask.replace(\".nii.gz\", \"_{}.nii.gz\".format(image_suffix))\n image_path = os.path.join(images_dir, image)\n print(image_path)\n image_sitk = sitk.ReadImage(image_path)\n image_npy = sitk.GetArrayFromImage(image_sitk)\n image_output_npy = crop_ND_volume_with_bounding_box(image_npy, crop_bbox_min, crop_bbox_max)\n save_cropped_array_as_nifty_volume(image_output_npy, os.path.join(images_output_dir, mask.replace( \\\n \".nii.gz\", \"_{0:04d}.nii.gz\".format(i))), image_sitk)\n save_json(json_dict, os.path.join(images_output_dir, \"crop_information.json\"))\n if masks_output_dir is not None:\n save_json(json_dict, os.path.join(masks_output_dir, \"crop_information.json\"))", "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n src.close()\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n #reshape to 400x400m\n print(\"Original shape is {}\".format(img.shape))\n resized = resize(img, 400, 400)\n boxes = deepforest_model.predict_image(numpy_image = resized, return_plot=False)\n \n if boxes.empty:\n return boxes\n \n #tranform boxes to original size\n x_scale = 400/img.shape[0]\n y_scale = 400/img.shape[1]\n \n boxes[\"xmin\"] = boxes[\"xmin\"]/x_scale \n boxes[\"xmax\"] = boxes[\"xmax\"]/x_scale \n boxes[\"ymin\"] = boxes[\"ymin\"]/y_scale \n boxes[\"ymax\"] = boxes[\"ymax\"]/y_scale \n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Give an id field\n boxes[\"box_id\"] = np.arange(boxes.shape[0])\n \n return boxes", "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n src.close()\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n #reshape to 400x400m\n print(\"Original shape is {}\".format(img.shape))\n resized = resize(img, 400, 400)\n boxes = deepforest_model.predict_image(numpy_image = resized, return_plot=False)\n \n if boxes.empty:\n return boxes\n \n #tranform boxes to original size\n x_scale = 400/img.shape[0]\n y_scale = 400/img.shape[1]\n \n boxes[\"xmin\"] = boxes[\"xmin\"]/x_scale \n boxes[\"xmax\"] = boxes[\"xmax\"]/x_scale \n boxes[\"ymin\"] = boxes[\"ymin\"]/y_scale \n boxes[\"ymax\"] = boxes[\"ymax\"]/y_scale \n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Give an id field\n boxes[\"box_id\"] = np.arange(boxes.shape[0])\n \n return boxes", "def crop(src_path, out_path, bbox_geometry, bbox_crs):\n\n # validate area of interest\n\n\n # load imagery\n satdata = rasterio.open(src_path)\n\n # grab crs\n crs = satdata.meta['crs']\n crs = str(crs).split(':')[-1]\n\n # check crs\n if(crs != bbox_crs):\n raise Exception(f'Imagery & bounding box crs mismatch ({crs}, {bbox_crs})')\n\n # apply mask with crop=True to crop the resulting raster to the AOI's bounding box\n clipped, transform = mask(satdata, aoi, crop=True)\n\n # Using a copy of the metadata from our original raster dataset, we can write a new geoTIFF\n # containing the new, clipped raster data:\n meta = satdata.meta.copy()\n\n # update metadata with new, clipped mosaic's boundaries\n meta.update(\n {\n \"transform\": transform,\n \"height\":clipped.shape[1],\n \"width\":clipped.shape[2]\n }\n )\n\n # write the clipped-and-cropped dataset to a new GeoTIFF\n with rasterio.open(out_path, 'w', **meta) as dst:\n dst.write(clipped)", "def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):\n \n # valid scle for box\n valid_scale=[0, np.inf]\n \n # turn bbox to array\n pred_bbox = np.array(pred_bbox)\n \n # obtain predicted x, y, w, h, objectiveness score, class probabilities\n pred_xywh = pred_bbox[:, 0:4]\n pred_objectiveness = pred_bbox[:, 4]\n pred_prob = pred_bbox[:, 5:]\n \n # 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)\n # obtain original image width and height\n org_h, org_w = original_image.shape[:2]\n \n # obtain resize ratio for height and width \n resize_ratio_h = train_input_size / org_h\n resize_ratio_w = train_input_size / org_w\n \n # scale x, y, w, h to original x, y, w, h\n pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1), \n np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),\n np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),\n np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)\n \n # 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)\n # obtain diagonal image coordinates\n pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,\n pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)\n\n # 3. clip some boxes those are out of range\n # clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds\n pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),\n np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)\n \n # mask that ensure that if xmin < xmax, ymin /> ymax and vice versa\n invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))\n pred_coor[invalid_mask] = 0\n\n # 4. discard some invalid boxes\n bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))\n scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))\n\n # 5. discard boxes with low scores\n # obtain index of class with max prob for each bbox\n classes = np.argmax(pred_prob, axis = -1)\n \n # multiply max prob with objectivness score for each bbox\n scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]\n \n # obtain score mask based on score threshold\n score_mask = scores > score_threshold\n \n # obtain combined mask\n mask = np.logical_and(scale_mask, score_mask)\n \n # obtain coordinates, scores and classes after mask\n coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]\n \n # return concatenated results \n return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)", "def Save_Image_Crop(img, x, y, width, height, filename = None, path = 'Predictions'):\n img = img[y:y+height, x:x+width,:]\n\n if filename is not None:\n try: \n os.mkdir(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img)\n plt.tight_layout()\n plt.savefig(path + '/' + filename + '_Crop.png')", "def crop_img(img_path, lbl_path, output_path):\n img_file_list = os.listdir(img_path)\n lbl_file_list = os.listdir(lbl_path)\n\n img_endswith = img_file_list[0].split(\".\")[-1]\n cnt = 1\n for lbl_file in lbl_file_list:\n coords = get_coords_from_label(os.path.join(lbl_path, lbl_file))\n for coord in coords:\n xmin, ymin, xmax, ymax = coord\n\n img = cv2.imread(os.path.join(img_path, lbl_file.replace(\"xml\", img_endswith)))\n cropped = img[ymin:ymax, xmin:xmax]\n\n output_file = os.path.join(output_path, \"{}.{}\".format(cnt, img_endswith))\n cv2.imwrite(output_file, cropped)\n cnt += 1", "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n boxes = deepforest_model.predict_image(numpy_image = img, return_plot=False)\n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Buffer slightly \n boxes.geometry = boxes.geometry.buffer(1)\n return boxes", "def get_crops(x_train, y_train, offset=4):\n\ttopleft = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, offset, offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\ttopright = iaa.Sequential([\n\t\tiaa.Crop(px=(4 - offset, 4 - offset, offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotleft = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, offset, 4 - offset, 4 - offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tbotright = iaa.Sequential([\n\t\tiaa.Crop(px=(offset, 4 - offset, 4 - offset, offset)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\tcenter = iaa.Sequential([\n\t\tiaa.Crop(px=(2, 2, 2, 2)),\n\t\tiaa.Affine(scale=1.166666667)\n\t])\n\taugs = [topleft, topright, botleft, botright, center]\n\n\taug_imgs = []\n\tfor aug in tqdm(augs):\n\t\taug_imgs.append(aug.augment_images(x_train * 255))\n\n\taug_x_train = [item for sublist in aug_imgs for item in sublist]\n\taug_y_train = y_train * 5\n\n\treturn aug_x_train, aug_y_train", "def initiate_yolo_detect(images_path, save_to_path, detections_file='pickles/bounding_boxes.pickle'):\n for filename in os.listdir(images_path):\n bound_boxes = detect_objects_on_image(\n os.path.join(images_path, filename), detections_file)\n predictions_path = os.path.join(\n save_to_path, 'predictions_' + filename)\n print('predictions path', predictions_path)\n copy2('predictions_' + os.path.basename(image_directory) +\n '.png', predictions_path)", "def crop_to_regions(img: np.ndarray, check_areas: List[Dict[str, List[int]]]) -> List[np.ndarray]:\n ret = []\n for region in check_areas:\n from_x = region['start'][0]\n from_y = region['start'][1]\n to_x = from_x + region['size'][0]\n to_y = from_y + region['size'][1]\n # Don't overflow\n to_x = to_x if to_x < img.shape[1] else img.shape[1] - 1\n to_y = to_y if to_y < img.shape[0] else img.shape[0] - 1\n ret.append(img[from_y:to_y, from_x:to_x])\n return ret", "def generate_outlines(self):\n morphed_atlas = bio.load_nii(self.registered_atlas_img_path, as_array=False)\n atlas_scale = morphed_atlas.header.get_zooms()\n morphed_atlas = morphed_atlas.get_data()\n boundaries_mask = sk_segmentation.find_boundaries(morphed_atlas, mode='inner')\n boundaries = morphed_atlas * boundaries_mask\n bio.to_nii(boundaries, self.outlines_file_path, scale=atlas_scale)", "def remove_yboundaries(self, **kwargs):\n\n variables = []\n xcoord = self.data.metadata[\"bout_xdim\"]\n ycoord = self.data.metadata[\"bout_ydim\"]\n new_metadata = None\n for v in self.data:\n if xcoord in self.data[v].dims and ycoord in self.data[v].dims:\n variables.append(\n self.data[v].bout.remove_yboundaries(return_dataset=True, **kwargs)\n )\n new_metadata = variables[-1].metadata\n elif ycoord in self.data[v].dims:\n raise ValueError(\n f\"{v} only has a {ycoord}-dimension so cannot split \"\n f\"into regions.\"\n )\n else:\n variable = self.data[v]\n if \"keep_yboundaries\" in variable.metadata:\n variable.attrs[\"metadata\"] = copy(variable.metadata)\n variable.metadata[\"keep_yboundaries\"] = 0\n variables.append(variable.bout.to_dataset())\n if new_metadata is None:\n # were no 2d or 3d variables so do not have updated jyseps*, ny_inner but\n # does not matter because missing metadata is only useful for 2d or 3d\n # variables\n new_metadata = variables[0].metadata\n\n result = xr.merge(variables)\n\n result.attrs = copy(self.data.attrs)\n\n # Copy metadata to get possibly modified jyseps*, ny_inner, ny\n result.attrs[\"metadata\"] = new_metadata\n\n if \"regions\" in result.attrs:\n # regions are not correct for modified BoutDataset\n del result.attrs[\"regions\"]\n\n # call to re-create regions\n result = apply_geometry(result, self.data.geometry)\n\n return result", "def _recover_boundingboxes(features):\n ymin = features['image/object/bbox/ymin'].values\n xmin = features['image/object/bbox/xmin'].values\n ymax = features['image/object/bbox/ymax'].values\n xmax = features['image/object/bbox/xmax'].values\n return tf.transpose([ymin, xmin, ymax, xmax])", "def predict_all_images():\n #Read config\n config = read_config()\n\n #read model\n model = read_model(config[\"model_path\"], config)\n tifs = glob.glob(os.path.join(\"data\",\"**\",\"*.tif\"))\n for tif in tifs:\n print(tif)\n prediction = predict_image(model, tif, score_threshold = 0.1, max_detections= 200,return_plot=False)\n\n #reshape and save to csv\n df = pd.DataFrame(prediction)\n df.columns = [\"xmin\",\"ymin\",\"xmax\",\"ymax\"]\n\n #save boxes\n file_path = os.path.splitext(tif)[0] + \".csv\"\n df.to_csv(file_path)", "def crop_from_dets(\n img, \n bboxes, \n target_height, \n target_width,\n extra_zoom\n):\n\n imght = img.size(1)\n imgwidth = img.size(2)\n tmp_img = img\n # normalization (per-channel)\n tmp_img[0].add_(-0.406)\n tmp_img[1].add_(-0.457)\n tmp_img[2].add_(-0.480)\n \n crops = []\n bboxes_zoomed = []\n for box in bboxes:\n upLeft = torch.Tensor(\n (float(box[0]), float(box[1])))\n bottomRight = torch.Tensor(\n (float(box[2]), float(box[3])))\n\n ht = bottomRight[1] - upLeft[1]\n width = bottomRight[0] - upLeft[0]\n if width > 100:\n scaleRate = 0.2\n else:\n scaleRate = 0.3\n\n # zooming the predicted bounding box\n upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)\n upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)\n bottomRight[0] = max(\n min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)\n bottomRight[1] = max(\n min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)\n \n # ADD EXTRA EXPANSION BECAUSE OF ARMS OUT OF THE BOX !!!\n # i.e. shift x-coordinate of the box corner to right or to left\n if extra_zoom == 'right_cam':\n bottomRight[0] += min(bottomRight[0]-upLeft[0], imgwidth-bottomRight[0])\n elif extra_zoom == 'left_cam':\n upLeft[0] -= min(upLeft[0], bottomRight[0]-upLeft[0])\n \n crops.append(cropBox(tmp_img, upLeft, bottomRight, target_height, target_width)[None,...])\n bboxes_zoomed.append(torch.cat((upLeft, bottomRight))[None,...])\n \n crops = torch.cat(crops, dim=0)\n bboxes_zoomed = torch.cat(bboxes_zoomed)\n \n return crops, bboxes_zoomed", "def crop_object_from_image(saving_folder,root_folder_path,root_folder_name,row_info):\n class_name=row_info['class']\n file_id=row_info['file_id']\n img_type=row_info['type']\n xmin=row_info['x_min']\n xmax=row_info['x_max']\n ymin=row_info['y_min']\n ymax=row_info['y_max']\n\n\n origin_img_path=os.path.join(root_folder_path,root_folder_name,img_type,file_id+\".png\")\n crop_img_path=os.path.join(saving_folder,file_id+\"_\"+class_name+\".png\")\n\n origin_img=cv2.imread(origin_img_path)\n crop_img=origin_img[ymin:ymax-1,xmin:xmax-1]\n\n # If width or height only contain 1 pixel, do not crop.\n if xmax-xmin<=2 or ymax-ymin<=2:\n print(\"Only one pixel, pass!\")\n return 0\n # print(origin_img.shape)\n # print(xmin,xmax,ymin,ymax)\n # print(crop_img.shape)\n # print(crop_img_path)\n cv2.imwrite(crop_img_path,crop_img)", "def save_widerface_bboxes(image_path, bboxes_scores, output_dir):\n image_name = image_path.split('/')[-1]\n image_class = image_path.split('/')[-2]\n\n odir = os.path.join(output_dir, image_class)\n if not os.path.exists(odir):\n os.makedirs(odir)\n\n ofname = os.path.join(odir, '%s.txt' % (image_name[:-4]))\n f = open(ofname, 'w')\n f.write('{:s}\\n'.format(image_class + '/' + image_name))\n f.write('{:d}\\n'.format(bboxes_scores.shape[0]))\n for box_score in bboxes_scores:\n xmin, ymin, xmax, ymax, score = box_score\n f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\\n'.format(xmin, ymin, (\n xmax - xmin + 1), (ymax - ymin + 1), score))\n f.close()\n print(\"The predicted result is saved as {}\".format(ofname))", "def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n # your code here\n # loop over each of the layer output (I guess the outs is the number of anchor boxes)\n for output in outs:\n # loop over each of the detection\n for detection in output:\n # extract the class ID and confidence of the current object detection\n # the detection is an array of [bx, by, bw, bh, Pc, c1, c2, ..., c80]\n # Pc is the probability that there is an object\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n \n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n \n classIds.append(classID)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n \n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n # your code here\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n \n # get the bounding bxoes after performing non maximum suppression\n # your code here\n output_boxes = []\n if len(idxs) > 0:\n for i in idxs.flatten(): # idxs = [[1],[2],[5],...], idxs.flatten() = [1,2,5,...]\n output_boxes.append(boxes[i])\n left = boxes[i][0]\n top = boxes[i][1]\n width = boxes[i][2]\n height = boxes[i][3]\n right = left + width\n bottom = top + height\n frame = self.drawPred(frame, classIds[i], confidences[i], left, top, right, bottom)\n \n output_image = frame\n return output_image, output_boxes" ]
[ "0.6000429", "0.59769034", "0.5903341", "0.58753145", "0.58705914", "0.58030313", "0.5755639", "0.5752636", "0.5709903", "0.5703744", "0.56888646", "0.5668789", "0.56635773", "0.56635773", "0.56446755", "0.562883", "0.56010115", "0.5571136", "0.5528559", "0.55171996", "0.5500763", "0.5483599", "0.5481124", "0.54785705", "0.5478356", "0.54720664", "0.5453353", "0.54493135", "0.54385746", "0.5434543" ]
0.64906526
0
Creates category id>name mapping from a coco annotation file.
def get_category_mapping_from_coco_file(coco_file_path: str) -> dict: # check if coco file is valid and read it (coco_dict, response) = read_and_validate_coco_annotation(coco_file_path) # raise error if coco file is not valid if not (response): raise TypeError coco_categories = coco_dict["categories"] category_mapping = { str(coco_category["id"]): coco_category["name"] for coco_category in coco_categories } return category_mapping
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_category_info_from_anno(anno_file, with_background=True):\n cats = []\n with open(anno_file) as f:\n for line in f.readlines():\n cats.append(line.strip())\n\n if cats[0] != 'background' and with_background:\n cats.insert(0, 'background')\n if cats[0] == 'background' and not with_background:\n cats = cats[1:]\n\n clsid2catid = {i: i for i in range(len(cats))}\n catid2name = {i: name for i, name in enumerate(cats)}\n\n return clsid2catid, catid2name", "def open_label_mapping_file(filename):\n cat_to_name = None\n \n with open(filename, 'r') as f:\n cat_to_name = json.load(f)\n \n return cat_to_name", "def create_labels_file(mapping_file, metadata_category, labels_file, simple_id=True, metadata_value=None):\n label_dict = ml_parse.parse_metadata_category_from_mapping_file(mapping_file, \\\n metadata_category)\n output = open(labels_file, 'w')\n output.write('label\\n')\n for key, value in label_dict.iteritems(): \n if simple_id: key = key.split('.')[0] \n if metadata_value is not None: value = str(value) in metadata_value\n output.write('%s\\t%s\\n' % (key, str(value)))\n output.close()", "def convert_labelme_to_coco(path_to_data):\r\n # convert labelme annotations to coco\r\n labelme2coco.convert(path_to_data, path_to_data + r'\\coco_annotation.json')\r\n\r\n # Open the coco format data\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n\r\n # Get the category IDs for each category and create a new \"categories\" section.\r\n categories = []\r\n # for category in coco_d['categories']:\r\n # if category['name'] == 'Bad':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": category['id'],\r\n # \"supercategory\": category['id'],\r\n # \"isthing\": 1,\r\n # \"color\": [222, 23, 1]\r\n # })\r\n # elif category['name'] == 'Good':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": \"Good\",\r\n # \"supercategory\": \"Good\",\r\n # \"isthing\": 1,\r\n # \"color\": [133, 23, 1]\r\n # })\r\n\r\n # Update the \"catogories\" section of the coco format data with the correct category IDs.\r\n # coco_d['categories'] = categories\r\n\r\n categories = []\r\n for cat in coco_d['categories']:\r\n cat['isthing'] = 1\r\n categories.append(cat['name'])\r\n\r\n # Fix the segmentation and bbox.\r\n for annot in coco_d['annotations']:\r\n annot['bbox_mode'] = 0\r\n seg = annot['segmentation'][0]\r\n annot['bbox'] = seg\r\n annot['segmentation'] = [[seg[0], seg[1], seg[0], seg[3], seg[2], seg[3], seg[2], seg[1]]]\r\n\r\n # Save the modified coco format data.\r\n with open(path_to_data + r'\\coco_annotation.json', 'w') as j:\r\n json.dump(coco_d, j, sort_keys=True, indent=4)\r\n\r\n # Show the images to the user to validate the annotations.\r\n # Register the image information.\r\n register_coco_instances(\"coco_visualise\", {}, path_to_data + r\"/coco_annotation.json\",\r\n path_to_data)\r\n MetadataCatalog.get(\"meta_visualise\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_train\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n train_metadata = MetadataCatalog.get(\"meta_visualise\")\r\n coco_train_dataset = DatasetCatalog.get(\"coco_visualise\")\r\n\r\n st.write('Showing the randomly picked 5 images. Check if the annotation is correctly embedded.')\r\n # Randomly pick 5 images to show to the user to validate the annotations.\r\n for d in random.sample(coco_train_dataset, 5):\r\n im = Image.open(d['file_name'])\r\n im_array = np.asarray(im)\r\n v = Visualizer(im_array, metadata=train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\r\n v = v.draw_dataset_dict(d)\r\n pil_image = Image.fromarray(v.get_image())\r\n st.image(pil_image)\r\n # window = tk.Toplevel()\r\n # window.tkimage = ImageTk.PhotoImage(pil_image)\r\n # window.attributes('-topmost', True)\r\n # label = tk.Label(window, image=window.tkimage)\r\n # label.pack()\r\n # button_close = tk.Button(window, text=\"Close\", command=window.destroy)\r\n # button_close.pack(fill='x')\r\n\r\n # Confirm the annotations with user. If the annotations are correct, it will proceed further.\r\n # If not, it terminates the program.\r\n # if messagebox.askyesno(title=\"Validate Annotations\", message=\"Were all annotations correct?\"):\r\n # pass\r\n DatasetCatalog.clear()\r\n MetadataCatalog.clear()", "def process_coco(coco_file_path: str) -> (list, dict):\n coco_dict = load_json(coco_file_path)\n\n # rearrange coco file for better annotation reach\n images = list()\n for image in coco_dict[\"images\"]:\n image_annotations = list()\n for annotation in coco_dict[\"annotations\"]:\n if image[\"id\"] == annotation[\"image_id\"]:\n image_annotations.append(annotation)\n image[\"annotations\"] = image_annotations\n images.append(image)\n\n return images, coco_dict[\"categories\"]", "def prep_coco_cats():\n for coco_cat_id, transformed_cat_id_p1 in get_label_map().items():\n transformed_cat_id = transformed_cat_id_p1 - 1\n coco_cats[transformed_cat_id] = coco_cat_id\n coco_cats_inv[coco_cat_id] = transformed_cat_id", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def coco_format(type_, id_list, annotation_url_list, file_list, result_list, label_list, coco_flag=0):\n annotations = []\n for i, result in enumerate(result_list):\n temp = {}\n annotation_url = annotation_url_list[i]\n file_path = file_list[i]\n temp['id'] = id_list[i]\n temp['annotation'] = []\n im = cv2.imread(file_path)\n height, width, _ = im.shape\n if result.shape[0] == 0:\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as w:\n w.write(temp['annotation'])\n continue\n else:\n for j in range(result.shape[0]):\n cls_id = int(result[j][0]) + 1 + coco_flag\n x1 = result[j][1]\n x2 = result[j][3]\n y1 = result[j][2]\n y2 = result[j][4]\n score = result[j][5]\n width = max(0, x2 - x1)\n height = max(0, y2 - y1)\n if cls_id in label_list:\n temp['annotation'].append({\n 'area': width * height,\n 'bbox': [x1, y1, width, height],\n 'category_id': cls_id,\n 'iscrowd': 0,\n 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],\n 'score': score\n })\n if type_ == 2 and len(temp['annotation']) > 0:\n temp['annotation'] = [temp['annotation'][0]]\n temp['annotation'][0].pop('area')\n temp['annotation'][0].pop('bbox')\n temp['annotation'][0].pop('iscrowd')\n temp['annotation'][0].pop('segmentation')\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as wr:\n wr.write(temp['annotation'])\n return annotations", "def generate_categories(script_file):\n # inspired by https://gist.github.com/anonymous/2204527\n code_points_ranges = []\n iso_15924_aliases = []\n categories = []\n\n match = re.compile(r'([0-9A-F]+)(?:\\.\\.([0-9A-F]+))?\\W+(\\w+)\\s*#\\s*(\\w+)',\n re.UNICODE)\n\n with open(script_file,'rb') as f:\n for line in f:\n line = Encoder.str2uni(line)\n p = re.findall(match, line)\n if p:\n code_point_range_from, code_point_range_to, alias, category = p[0]\n alias = alias.upper()\n if alias not in iso_15924_aliases:\n iso_15924_aliases.append(alias)\n if category not in categories:\n categories.append(category)\n code_points_ranges.append((\n int(code_point_range_from, 16),\n int(code_point_range_to or code_point_range_from, 16),\n iso_15924_aliases.index(alias), categories.index(category))\n )\n code_points_ranges.sort()\n\n categories_data = {\n 'iso_15924_aliases': iso_15924_aliases,\n 'categories': categories,\n 'code_points_ranges': code_points_ranges,\n }\n\n Jfile.dump(_lexicons.format('categories.json'), categories_data)", "def load_annotations(self, ann_file, N, kind):\n\n self.coco = COCOPoint(ann_file, N=N, kind=kind)\n # The order of returned `cat_ids` will not\n # change with the order of the CLASSES\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info[\"filename\"] = info[\"file_name\"]\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos", "def convert_to_trainid(input_filename, output_filename):\n with open(input_filename, \"r\") as f:\n coco_json = json.load(f)\n\n coco_anns = coco_json.pop('annotations')\n coco_cats = coco_json.pop('categories')\n coco_trainid_json = copy.deepcopy(coco_json)\n\n coco_train_id_to_eval_id = [coco_cat['id'] for coco_cat in coco_cats]\n coco_eval_id_to_train_id = {v: k for k, v in enumerate(coco_train_id_to_eval_id)}\n\n new_cats = []\n for coco_cat in coco_cats:\n coco_cat['id'] = coco_eval_id_to_train_id[coco_cat['id']]\n new_cats.append(coco_cat)\n coco_trainid_json['categories'] = new_cats\n\n new_anns = []\n for coco_ann in coco_anns:\n segments_info = coco_ann.pop('segments_info')\n new_segments_info = []\n for segment_info in segments_info:\n segment_info['category_id'] = coco_eval_id_to_train_id[segment_info['category_id']]\n new_segments_info.append(segment_info)\n coco_ann['segments_info'] = new_segments_info\n new_anns.append(coco_ann)\n coco_trainid_json['annotations'] = new_anns\n\n with open(output_filename, \"w\") as f:\n json.dump(coco_trainid_json, f)\n print(\"{} is converted to trainid and stored in {}.\".format(input_filename, output_filename))", "def merge_coco_annotations(anno_file_list, save_dir, pre_fixs, keep_cls=13):\n annos = mmcv.load(anno_file_list[0])\n categories =[info for info in annos['categories'] if info['id'] == keep_cls]\n info = annos['info']\n licenses = annos['licenses']\n del annos\n\n new_coco_annotations = {}\n new_annotations = []\n new_images = []\n image_idx_offset = 0\n anno_idx_offset = 0\n for file_i, file_path in tqdm(enumerate(anno_file_list), total=len(anno_file_list)):\n annos = mmcv.load(file_path)\n cur_images = annos['images']\n cur_annotations = annos['annotations']\n for img_info in cur_images: # loop through image_infos\n img_info['id'] += image_idx_offset\n img_info['file_name'] = os.path.join(pre_fixs[file_i], img_info['file_name'])\n new_images.append(img_info)\n for anno_info in cur_annotations: # loop through boxes\n anno_info['id'] += anno_idx_offset\n anno_info['image_id'] += image_idx_offset\n cat_id = keep_cls if anno_info['category_id'] == keep_cls else 15\n anno_info['category_id'] = cat_id\n new_annotations.append(anno_info)\n\n image_idx_offset += len(cur_images)\n anno_idx_offset += len(cur_annotations)\n\n new_coco_annotations['info'] = info\n new_coco_annotations['licenses'] = licenses\n new_coco_annotations['categories'] = categories\n new_coco_annotations['annotations'] = new_annotations\n new_coco_annotations['images'] = new_images\n\n print('convert done\\nsaving coco annotations')\n mmcv.dump(new_coco_annotations, save_dir)\n print('all done!')", "def _convert(self, coco_cat_id):\n map = {\n # coco: voc\n 5 : 1,\n 2 : 2,\n 15: 3,\n 9 : 4,\n 40: 5,\n 6 : 6,\n 3 : 7,\n 16: 8,\n 57: 9,\n 20: 10,\n 61: 11,\n 17: 12,\n 18: 13,\n 4 : 14,\n 1 : 15,\n 59: 16,\n 19: 17,\n 58: 18,\n 7 : 19,\n 63: 20,\n }\n\n if not coco_cat_id in map:\n voc_cat_id = None\n else:\n voc_cat_id = map[coco_cat_id]\n\n return voc_cat_id", "def map_category_id(category_map):\n category_id = {}\n id_category = {}\n counter = 0\n for category in category_map:\n category_id[category['name']] = counter\n id_category[counter] = category['name']\n counter += 1\n return category_id, id_category", "def add_annotations(self, annotations, categories):\n annotations_id = COCOTools.get_annotations_id(self.coco[\"annotations\"])\n categories_id = COCOTools.get_categories_id(self.coco[\"categories\"])\n # cat_name = categories_id.keys()\n # cat_id = categories_id.values()\n max_id = 0\n if annotations_id:\n max_id = max(annotations_id)\n add_categories_id = COCOTools.get_categories_id(categories)\n add_id_categories = {v: k for k, v in add_categories_id.items()}\n\n for item in annotations:\n category_id = item[\"category_id\"]\n name = add_id_categories[category_id]\n item[\"category_id\"] = categories_id[name]\n max_id += 1\n item[\"id\"] = max_id\n self.coco['annotations'].append(item)\n # annotations_id__ = self.get_annotations_id(self.coco[\"annotations\"])\n # self.check_uniqueness(annotations_id, title=\"annotations_id\")", "def tag_mapping(data_path, data_type):\n with open(data_path+data_type+\"_labels.txt\", \"r\") as file1:\n tags = [line.split(\" \")[:-1] for line in file1.readlines()]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag", "def parse_category_annotations(self, annotations):\n categories = {}\n category_list, supercategory_list, category_id = [], [], []\n for i, annot in enumerate(annotations['categories']):\n categories[annot['id']] = {\n \"name\": annot['name'],\n \"supercategory\": annot['supercategory'],\n \"id\": annot['id']\n }\n category_id.append(annot['id'])\n category_list.append(annot['name'])\n supercategory_list.append(annot['supercategory'])\n supercategory_list = list(set(supercategory_list))\n\n return categories, category_list, supercategory_list, category_id", "def group_categories(categories_file):\n # map each category id to its name\n id_to_category = {}\n for category in categories_file['categories']:\n id_to_category[category['id']] = category['name']\n\n image_categories = {}\n for category in categories_file['annotations']:\n if category['image_id'] not in image_categories:\n image_categories[category['image_id']] = []\n if id_to_category[category['category_id']] not in image_categories[category['image_id']]:\n image_categories[category['image_id']].append(id_to_category[category['category_id']])\n return image_categories", "def category_cloth_attr(cloth_img_txt):\n categoryDict = {}\n linecount = 0\n with open(cloth_img_txt, 'r') as file:\n for linetext in file:\n # linetext = file.readline()\n line = linetext.rstrip(' \\n')\n if linecount > 1:\n line_attributes = line.split(\" \")\n categoryDict.update({line_attributes[0]: int(line_attributes[-1])})\n linecount += 1\n return categoryDict", "def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict", "def create_class_indices(self) -> None:\n\n categories = list(np.loadtxt(self.path_manager.categories_file(), delimiter=\",\", dtype=str))\n\n if self.include_noise_samples and not self.multi_label_classification:\n categories.append(\"noise\")\n\n self.class_to_idx = {}\n\n for idx, class_name in enumerate(sorted(categories)):\n self.class_to_idx[class_name] = idx", "def create_labels(filename, class_indices):\n \n _logger.debug(\"Mapping labels\")\n label={}\n label['category']=[]\n for key in class_indices:\n label['category'].append({\n 'name' : key,\n 'index' : class_indices[key]\n })\n label_path = os.path.join(config.TRAINED_MODELS_DATA, filename)\n with open(os.path.join(label_path, 'labels.txt'), 'w') as outfile:\n json.dump(label, outfile)\n return label_path", "def load_categories_to_names(cat_to_name_filepath):\n with open(cat_to_name_filepath, 'r') as f:\n cat_to_name = json.load(f)\n\n return cat_to_name", "def load_annos(self):\n data = None\n with open(self.anno_path, 'r') as file:\n if self.ext == '.json':\n data = json.load(file)\n\n # Label start at index 0\n if data is not None:\n for anno in data['annotations']:\n anno['category_id'] -= 1\n\n for anno in data['categories']:\n anno['id'] -= 1\n\n return data", "def get_categories(mapping):\n categories = []\n \n for idx, name in mapping.items(): \n temp = {'id':idx, 'name':name, 'supercategory':'NA'}\n categories.append(temp)\n \n return categories", "def generate_category_dict(category_file):\n L = json.load(open(category_file))\n d = {}\n for dict_item in L:\n item = item_to_dict(dict_item)\n if not item['Label'] is None:\n d[item['URN']] = item['Label']\n else:\n d[item['URN']] = ''\n return d", "def masks_to_coco(\n image_dir: Path,\n mask_dir: Path,\n color_to_category_map: Mapping[str, str],\n image_to_mask_pattern=None,\n):\n if not image_dir.is_dir():\n raise NotADirectoryError(f\"Not a directory: {image_dir}\")\n if not mask_dir.is_dir():\n raise NotADirectoryError(f\"Not a directory: {mask_dir}\")\n if image_to_mask_pattern is None:\n image_to_mask_pattern = r\"$.*\\.(?=[^.]+$)\"\n\n categories = []\n color_to_idx_map = {}\n for i, (color, name) in enumerate(color_to_category_map.items()):\n categories.append({\"id\": i, \"name\": name})\n color_to_idx_map[color.lower()] = i\n\n def _image_name_to_mask(name):\n try:\n return re.search(image_to_mask_pattern, name).group() + \".png\"\n except AttributeError:\n raise ValueError(\n f\"Could not extract mask filename from {name} \"\n f\"using pattern {repr(image_to_mask_pattern)}\"\n )\n\n images = []\n annotations = []\n colors_not_found = set()\n categories_found = set()\n for path in sorted(image_dir.glob(\"*.*\")):\n if path.name.startswith(\".\"):\n # real glob excludes these\n continue\n mask_path = mask_dir / (_image_name_to_mask(path.name))\n if not mask_path.exists():\n raise FileNotFoundError(\n f\"No mask found at {mask_path} for image named {path.name}.\"\n )\n dims = get_image_dimensions(mask_path)\n if get_image_dimensions(path) != dims:\n raise ValueError(\n f\"Got inconsistent dimensions for image \"\n f\"({get_image_dimensions(path)}) and mask ({dims})\"\n )\n\n segmentations = generate_segmentations(\n mask_path, color_to_idx_map, colors_not_found\n )\n for rle, cat_idx in segmentations:\n categories_found.add(cat_idx)\n bbox = list(map(int, pycocotools.mask.toBbox(rle)))\n annotation = {\n \"id\": len(annotations),\n \"image_id\": len(images),\n \"category_id\": cat_idx,\n \"segmentation\": rle,\n # \"is_crowd\": 0, # TODO: how should we define this?\n \"bbox\": bbox,\n }\n annotations.append(annotation)\n\n images.append({\"id\": len(images), \"file_name\": path.name, **dims})\n\n if not images:\n raise ValueError(f\"No images found in {image_dir}\")\n\n if len(colors_not_found) > 1:\n raise ValueError(\n f\"Expected at most one color to not be mapped to a category. \"\n f\"Got {len(colors_not_found)}: {', '.join(f'#{x}' for x in sorted(colors_not_found))}.\"\n )\n if len(categories_found) != len(categories):\n missing_category_names = {\n cat[\"name\"] for cat in categories if cat[\"id\"] not in categories_found\n }\n missing_category_colors = {\n color: name\n for color, name in color_to_category_map.items()\n if name in missing_category_names\n }\n\n warnings.warn(\n f\"{len(categories)} categories defined, but only \"\n f\"{len(categories_found)} of these are present in masks. \"\n f\"These categories were not found: {missing_category_colors}\"\n )\n\n out = {\n \"images\": images,\n \"annotations\": annotations,\n \"info\": {},\n \"categories\": categories,\n }\n\n return out", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CLAS').get('abstractTypes')\n exolinks = globalMap.get('CLAS').get('exolinks')\n\n # Class AbstractCategory\n currentMap = {}\n abstractTypes['AbstractCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'\n currentMap['eType'] = 'cplx'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.AbstractCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractCategory.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'] = currentMap\n loadMaps['CLAS.AbstractCategory.details'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AbstractCategory.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'] = currentMap\n loadMaps['CLAS.AbstractCategory.name'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AbstractCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AbstractCategory\n\n currentMap = abstractTypes.get('AbstractCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Classification\n currentMap = {}\n abstractTypes['Classification'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'] = currentMap\n loadMaps['CLAS.Classification'] = currentMap\n currentMap['tag'] = 'CLAS.Classification'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'classifications'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'namingSystem'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Classification.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Classification.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute Classification.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.namingSystem\n currentMap = {}\n contentMap['namingSystem'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'] = currentMap\n loadMaps['CLAS.Classification.namingSystem'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.namingSystem'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'\n currentMap['name'] = 'namingSystem'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role Classification.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Classification.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'] = currentMap\n loadMaps['CLAS.Classification.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.experimentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'] = currentMap\n loadMaps['CLAS.Classification.hazardPhrases'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.hazardPhrases'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.holderCategorys\n currentMap = {}\n contentMap['holderCategorys'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'] = currentMap\n loadMaps['CLAS.Classification.holderCategorys'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.holderCategorys'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'\n currentMap['name'] = 'holderCategorys'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'] = currentMap\n loadMaps['CLAS.Classification.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.instrumentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'] = currentMap\n loadMaps['CLAS.Classification.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleCategories'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleComponentCategory\n currentMap = {}\n contentMap['sampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'] = currentMap\n loadMaps['CLAS.Classification.sampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleComponentCategory'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'\n currentMap['name'] = 'sampleComponentCategory'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'] = currentMap\n loadMaps['CLAS.Classification.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetScoreboards'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'] = currentMap\n loadMaps['CLAS.Classification.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetStatus'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n # End of Classification\n\n currentMap = abstractTypes.get('Classification')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['namingSystem']\n currentMap['simpleAttrs'] = aList\n aList = ['targetStatus', 'targetScoreboards', 'sampleComponentCategory', 'sampleCategories', 'instrumentTypes', 'holderCategorys', 'hazardPhrases', 'experimentTypes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['experimentTypes', 'hazardPhrases', 'holderCategorys', 'instrumentTypes', 'sampleCategories', 'sampleComponentCategory', 'targetScoreboards', 'targetStatus']\n currentMap['children'] = aList\n\n # Class SampleComponentCategory\n currentMap = {}\n abstractTypes['SampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'] = currentMap\n loadMaps['CLAS.SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleComponentCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponentCategory'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponentCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponentCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleComponentCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleComponentCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleComponentCategory\n\n currentMap = abstractTypes.get('SampleComponentCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ExperimentType\n currentMap = {}\n abstractTypes['ExperimentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'] = currentMap\n loadMaps['CLAS.ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'experimentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ExperimentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ExperimentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute ExperimentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role ExperimentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ExperimentType.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'] = currentMap\n loadMaps['CLAS.ExperimentType.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.instrumentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role ExperimentType.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'] = currentMap\n loadMaps['CLAS.ExperimentType.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.sampleCategories'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of ExperimentType\n\n currentMap = abstractTypes.get('ExperimentType')\n aList = ['details', 'name', 'instrumentTypes', 'sampleCategories']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetScoreboard\n currentMap = {}\n abstractTypes['TargetScoreboard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'] = currentMap\n loadMaps['CLAS.TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetScoreboards'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetScoreboard.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetScoreboard.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetScoreboard.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetScoreboard.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetScoreboard.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'] = currentMap\n loadMaps['CLAS.TargetScoreboard.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard.targetStatus'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetScoreboard\n\n currentMap = abstractTypes.get('TargetScoreboard')\n aList = ['details', 'name', 'targetStatus']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HolderCategory\n currentMap = {}\n abstractTypes['HolderCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'] = currentMap\n loadMaps['CLAS.HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.HolderCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'holderCategorys'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HolderCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HolderCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HolderCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role HolderCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HolderCategory\n\n currentMap = abstractTypes.get('HolderCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HazardPhrase\n currentMap = {}\n abstractTypes['HazardPhrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'] = currentMap\n loadMaps['CLAS.HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'hazardPhrases'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HazardPhrase.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HazardPhrase.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HazardPhrase.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Attribute HazardPhrase.phrase\n currentMap = {}\n contentMap['phrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'] = currentMap\n loadMaps['CLAS.HazardPhrase.phrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase.phrase'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'\n currentMap['name'] = 'phrase'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role HazardPhrase.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HazardPhrase\n\n currentMap = abstractTypes.get('HazardPhrase')\n aList = ['details', 'name', 'phrase']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class InstrumentType\n currentMap = {}\n abstractTypes['InstrumentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'] = currentMap\n loadMaps['CLAS.InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'instrumentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute InstrumentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute InstrumentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute InstrumentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role InstrumentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role InstrumentType.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'] = currentMap\n loadMaps['CLAS.InstrumentType.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of InstrumentType\n\n currentMap = abstractTypes.get('InstrumentType')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleCategory\n currentMap = {}\n abstractTypes['SampleCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'] = currentMap\n loadMaps['CLAS.SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleCategories'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleCategory.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'] = currentMap\n loadMaps['CLAS.SampleCategory.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of SampleCategory\n\n currentMap = abstractTypes.get('SampleCategory')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetStatus\n currentMap = {}\n abstractTypes['TargetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'] = currentMap\n loadMaps['CLAS.TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetStatus'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetStatus.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetStatus.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetStatus.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetStatus.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetStatus.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'] = currentMap\n loadMaps['CLAS.TargetStatus.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus.targetScoreboards'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetStatus\n\n currentMap = abstractTypes.get('TargetStatus')\n aList = ['details', 'name', 'targetScoreboards']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to Classification\n currentMap = {}\n exolinks['Classification'] = currentMap\n loadMaps['CLAS.exo-Classification'] = currentMap\n currentMap['tag'] = 'CLAS.exo-Classification'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['name'] = 'Classification'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to SampleComponentCategory\n currentMap = {}\n exolinks['SampleComponentCategory'] = currentMap\n loadMaps['CLAS.exo-SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleComponentCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['name'] = 'SampleComponentCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to ExperimentType\n currentMap = {}\n exolinks['ExperimentType'] = currentMap\n loadMaps['CLAS.exo-ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-ExperimentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['name'] = 'ExperimentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetScoreboard\n currentMap = {}\n exolinks['TargetScoreboard'] = currentMap\n loadMaps['CLAS.exo-TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetScoreboard'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['name'] = 'TargetScoreboard'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HolderCategory\n currentMap = {}\n exolinks['HolderCategory'] = currentMap\n loadMaps['CLAS.exo-HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HolderCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['name'] = 'HolderCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HazardPhrase\n currentMap = {}\n exolinks['HazardPhrase'] = currentMap\n loadMaps['CLAS.exo-HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HazardPhrase'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['name'] = 'HazardPhrase'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to InstrumentType\n currentMap = {}\n exolinks['InstrumentType'] = currentMap\n loadMaps['CLAS.exo-InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-InstrumentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['name'] = 'InstrumentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleCategory\n currentMap = {}\n exolinks['SampleCategory'] = currentMap\n loadMaps['CLAS.exo-SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['name'] = 'SampleCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetStatus\n currentMap = {}\n exolinks['TargetStatus'] = currentMap\n loadMaps['CLAS.exo-TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetStatus'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['name'] = 'TargetStatus'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def convert_to_annotation(file, output):\n resource = parse_bel_resource(file)\n\n write_annotation(\n keyword=resource['Namespace']['Keyword'],\n values={k: '' for k in resource['Values']},\n citation_name=resource['Citation']['NameString'],\n description=resource['Namespace']['DescriptionString'],\n file=output,\n )", "def create_idx_to_cat(cat_to_idx):\n\n return {val: key for key, val in cat_to_idx.items()}" ]
[ "0.64456666", "0.6217465", "0.6213983", "0.6204705", "0.61832976", "0.6114472", "0.60462743", "0.5998794", "0.596586", "0.5962729", "0.5960021", "0.58677703", "0.58615774", "0.5853491", "0.5826413", "0.5825861", "0.5769368", "0.5730286", "0.5600615", "0.55968726", "0.55927026", "0.5578842", "0.55760485", "0.55548096", "0.5528777", "0.55238295", "0.549699", "0.5486354", "0.54862946", "0.54757315" ]
0.7166214
0
Takes single coco dataset file path, split images into trainval and saves as seperate coco dataset files.
def split_coco_as_train_val( coco_file_path: str, target_dir: str, train_split_rate: float ): # check if coco file is valid and read it (coco_dict, response) = read_and_validate_coco_annotation(coco_file_path) # raise error if coco file is not valid if not (response): raise TypeError # divide coco dict into train val coco dicts num_images = len(coco_dict["images"]) random_indices = np.random.permutation(num_images).tolist() num_train = int(num_images * train_split_rate) # divide images train_indices = random_indices[:num_train] val_indices = random_indices[num_train:] train_images = np.array(coco_dict["images"])[ (np.array(train_indices) - 1).tolist() ].tolist() val_images = np.array(coco_dict["images"])[ (np.array(val_indices) - 1).tolist() ].tolist() # divide annotations train_annotations = list() val_annotations = list() for annotation in coco_dict["annotations"]: if annotation["image_id"] in train_indices: train_annotations.append(annotation) elif annotation["image_id"] in val_indices: val_annotations.append(annotation) # form train val coco dicts train_coco_dict = { "images": train_images, "annotations": train_annotations, "categories": coco_dict["categories"], } val_coco_dict = { "images": val_images, "annotations": val_annotations, "categories": coco_dict["categories"], } # get filename of the base coco file base_coco_filename = os.path.basename(coco_file_path).replace(".json", "") # save train val coco files save_json( train_coco_dict, os.path.join(target_dir, base_coco_filename + "_train.json") ) save_json(val_coco_dict, os.path.join(target_dir, base_coco_filename + "_val.json"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(path_to_data, path_to_save_train,\n path_to_save_val, split_size = 0.1):\n \n folders = os.listdir(path_to_data)\n\n # get the data and split it\n for folder in folders:\n full_path = os.path.join(path_to_data, folder)\n images_paths = glob.glob(os.path.join(full_path, '*.png')) # image paths\n\n # split the data\n x_train, x_val = train_test_split(images_paths, test_size = split_size)\n\n for x in x_train:\n path_to_folder = os.path.join(path_to_save_train, folder)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n \n print(\"Copying \", x, \" to \", path_to_folder)\n shutil.copy(x, path_to_folder)\n\n for x in x_val:\n path_to_folder = os.path.join(path_to_save_val, folder)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n \n print(\"Copying \", x, \" to \", path_to_folder)\n shutil.copy(x, path_to_folder)", "def generate_coco_dataset_sub(args, idx1, idx2, cat):\n\tdata_path = args.data_root / '{}2017'.format(idx1)\n\tanno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)\t# eg. anno_path is \"datasets/COCO/annotations/instances_train2017.json\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# or \"datasets/COCO/annotations/instances_val2017.json\"\n\tcoco = COCO(anno_path) # COCO API\n\n\n\timg_path = args.save_root / '{}{}'.format(idx1, idx2)\t\t# eg. img_path is \"datasets/shp2gir_coco/trainA\" or \"datasets/shp2gir_coco/trainB\"\n\tseg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)\t# eg. img_path is \"datasets/shp2gir_coco/trainA_seg\" or \"datasets/shp2gir_coco/trainB_seg\"\n\timg_path.mkdir()\t\t\t\t\t\t\t\t\t\t\t# they are empty, therefore mkdir()s\n\tseg_path.mkdir()\n\n\tcat_id = coco.getCatIds(catNms=cat)\t\t# cat is \"sheep\" or \"giraffe\",get the category's id\n\timg_id = coco.getImgIds(catIds=cat_id)\t# get the ids of sheep/giraffe images,获得所有绵羊的图片id,或者所有长颈鹿的图片id\n\timgs = coco.loadImgs(img_id)\t\t\t# 获得所有绵羊的图片(很多张),或者所有长颈鹿的图片\n\n\t# tqdm表示进度条,progress\n\t# refer:https://tqdm.github.io/\n\tpb = tqdm(total=len(imgs))\n\tpb.set_description('{}{}'.format(idx1, idx2))\n\tfor img in imgs:\n\t\tann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)\t# get annotation'id\n\t\tanns = coco.loadAnns(ann_ids)\t\t\t\t\t\t\t\t# get the annotation(many)\n\n\t\tcount = 0\n\t\tfor i in range(len(anns)):\t\t\t\t# 真正从标签生成mask的地方。\n\t\t\tseg = coco.annToMask(anns[i])\t\t# annotation to mask, the type is array now\n\t\t\tseg = Image.fromarray(seg * 255)\t# turn the seg array to seg image,each pix multi 255. why?\n\t\t\tseg = resize(seg, args.image_size)\t# resize the seg image\n\t\t\t# np.sum\n\t\t\tif np.sum(np.asarray(seg)) > 0:\t\t\t\t\t\t\t\t# 保存seg\n\t\t\t\tseg.save(seg_path / '{}_{}.png'.format(pb.n, count))\t# pb.n 表示?\n\t\t\t\tcount += 1\n\n\t\tif count > 0: # at least one instance exists\n\t\t\timg = Image.open(data_path / img['file_name'])\n\t\t\timg = resize(img, args.image_size)\n\t\t\timg.save(img_path / '{}.png'.format(pb.n))\n\n\t\tpb.update(1)\n\tpb.close()", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def run(dataset_dir, dataset_split_name='train',num_images=20,image_h=128, image_w=128):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n # for url in _DATA_URLS:\n # download_and_uncompress_zip(url, dataset_dir)\n\n record_dir = os.path.join(dataset_dir, 'records')\n # annotation_dir = os.path.join(dataset_dir, 'annotations')\n\n if not tf.gfile.Exists(record_dir):\n tf.gfile.MakeDirs(record_dir)\n\n # process the training, validation data:\n _add_to_tfrecord(record_dir,\n num_images,\n image_h,\n image_w,\n dataset_split_name)\n\n\n print('\\nFinished converting the coco dataset!')", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def generate_coco_dataset(args):\n\targs.data_root = Path(args.data_root)\n\targs.save_root = Path(args.save_root)\n\targs.save_root.mkdir()\n\n\tgenerate_coco_dataset_sub(args, 'train', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'train', 'B', args.cat2)\n\tgenerate_coco_dataset_sub(args, 'val', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'val', 'B', args.cat2)", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def prepare_val_coco_data(args):\n image_dir, annotation_file = args.val_coco_image_dir, args.val_coco_annotation_file\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name']))\n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n\n print(\"Building the validation dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return coco, dataset", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def create_train_sets(self, proportion_val):\n l_path = os.listdir(self.image_folder_path)\n lr_path = random.sample(l_path, len(l_path))\n val_files = lr_path[: round(proportion_val * len(lr_path))]\n train_files = lr_path[round(proportion_val * len(lr_path)) :]\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"train.txt\", self.txt_path, train_files)\n write_txt(\"val.txt\", self.txt_path, val_files)", "def data_split(config_path: Text) -> None:\n\n config = yaml.safe_load(open(config_path))\n\n dataset = pd.read_csv(config['featurize']['features_data'])\n train_dataset, test_dataset = train_test_split(\n dataset, \n test_size = config['data_split']['test_size'],\n random_state = config['data_split']['random_state']\n )\n\n train_csv_path = config['data_split']['train_path']\n test_csv_path = config['data_split']['test_path']\n train_dataset.to_csv(train_csv_path, index=False)\n test_dataset.to_csv(test_csv_path, index=False)", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def split_image(origindatadir,traindir,overload = False):\n \"\"\"origindatadir: from where to import train_data\"\"\"\n \"\"\"traindir: where to save the split data \"\"\"\n \"\"\"overload: if True and traindir and data already exist, delete traindir and split origin data again\"\"\"\n if not os.path.exists(origindatadir):\n return\n cats_dir = traindir+'/cats'\n dogs_dir = traindir+'/dogs'\n if not os.path.exists(traindir):\n os.mkdir(traindir)\n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n #print(traindir)\n if get_subdir_filenum(traindir) > 0:\n if overload:\n shutil.rmtree(traindir)\n os.mkdir(traindir) \n os.mkdir(cats_dir)\n os.mkdir(dogs_dir)\n else:\n print(\"Destination directory already exist:\",traindir)\n return\n #开始复制\n filenames = os.listdir('train')\n for file in filenames:\n if str(file).startswith('cat'):\n shutil.copyfile(origindatadir+'/'+file, cats_dir+'/'+file) \n elif str(file).startswith('dog'):\n shutil.copyfile(origindatadir+'/'+file, dogs_dir+'/'+file)", "def split_test_train(train_folder_path, train_labels, test_folder, n_test_images):\n\n os.makedirs(test_folder, exist_ok=True)\n\n data = read_csv_to_list(train_labels)\n # Prepare test labels and move images to new folder\n labels = []\n for img in data[1:n_test_images]:\n # Input and new image paths\n # print(type(train_folder_path),type(img[0]))\n img_path = train_folder_path / (img[0] + \".dcm\")\n new_img_path = test_folder / (img[0] + \".dcm\")\n if Path(img_path).exists(): # there can be several annotations per image\n shutil.move(img_path, new_img_path)\n labels.append(img)\n\n # Prepare train labels. Removes duplicate as we dont need them.\n train_labels = []\n img_list_names = []\n for idx, label in enumerate(data[n_test_images + 1 :]):\n if (label[0] in img_list_names) and (idx != 0):\n continue\n img_list_names.append(label[0])\n train_labels.append(label)\n\n # labels.insert(0, data[0])\n # train_labels.insert(0, data[0])\n return train_labels, labels", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def prepare_train_dataset(name, reso, batch_size=32):\r\n transform = transforms.Compose([\r\n transforms.RandomResizedCrop(size=reso, interpolation=3),\r\n transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),\r\n transforms.RandomVerticalFlip(),\r\n transforms.ToTensor()\r\n ])\r\n\r\n path = config.datasets[name]\r\n\r\n if name == 'coco':\r\n img_datasets = CocoDataset(root=path['train_imgs'], annFile=path['train_anno'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=CocoDataset.collate_fn)\r\n elif name == 'voc':\r\n img_datasets = VocDataset(train_list=path['train_imgs'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=VocDataset.collate_fn)\r\n\r\n return img_datasets, dataloder", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def test_train_split(folder_name):\n\n class_folders = glob.glob(os.path.join(folder_name, '*'))\n\n class_names = [i.split('/')[-1] for i in class_folders]\n\n print(class_folders)\n\n train_folder_path = os.path.join(folder_name, 'train_dir')\n validation_folder_path = os.path.join(folder_name, 'val_dir')\n\n if not os.path.exists(train_folder_path):\n os.makedirs(train_folder_path)\n if not os.path.exists(validation_folder_path):\n os.makedirs(validation_folder_path)\n\n # Create the folder structure\n class_folders_train = []\n class_folders_val = []\n for class_name in class_names:\n # Create calss folder in the training directory\n class_folders_train.append(os.path.join(train_folder_path, class_name))\n if not os.path.exists(class_folders_train[-1]):\n os.makedirs(class_folders_train[-1])\n # Create class folder in the validation_directory\n class_folders_val.append(os.path.join(\n validation_folder_path, class_name))\n if not os.path.exists(class_folders_val[-1]):\n os.makedirs(class_folders_val[-1])\n\n class_files = []\n\n for idx, class_folder in enumerate(class_folders):\n class_files = glob.glob(os.path.join(class_folder, '*.jpg'))\n for file in class_files[:int(len(class_files) * 0.7)]:\n copyfile(file, os.path.join(\n class_folders_train[idx], file.split('/')[-1]))\n for file in class_files[int(len(class_files) * 0.7):]:\n print(file)\n print(os.path.join(class_folders_val[idx], file.split('/')[-1]))\n copyfile(file, os.path.join(\n class_folders_val[idx], file.split('/')[-1]))", "def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def save_to_disk(x_data, y_data, usage, output_dir='cifar10_images'):\n assert usage in ['train', 'val', 'test']\n\n # Set paths\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for label in np.unique(y_data):\n label_path = os.path.join(output_dir, usage, str(label))\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n\n for idx, img in enumerate(x_data):\n bgr_img = img[..., ::-1] # RGB -> BGR\n # label = y_data[idx][0]\n label = y_data[idx]\n img_path = os.path.join(\n output_dir, usage, str(label), 'img_{}.jpg'.format(idx)\n )\n retval = cv2.imwrite(img_path, bgr_img)\n\n assert retval, 'Problem saving image at index: {}'.format(idx)", "def convert_labels() -> None:\n data_folder = 'images'\n validation_split = 0.10\n\n # Convert annotations and split into validation and train set\n number_images = int(len(os.listdir(data_folder)) / 2)\n train_size = int(number_images * (1 - validation_split))\n val_size = number_images - train_size\n\n print(f'Training dataset size: {train_size}')\n print(f'Validation dataset size: {val_size}')\n\n with open('train.txt', 'w') as train_file, open('val.txt', 'w') as val_file:\n files = os.listdir(data_folder)\n print(len(files))\n # shuffle otherwise validation is from the same session\n random.shuffle(files)\n processed = 0\n for file_name in files:\n if file_name.split('.')[1] == 'jpg':\n # if image has no labels\n write = False\n if processed < train_size:\n file_to_write = train_file\n else:\n file_to_write = val_file\n\n with open(f'{data_folder}/{file_name}'.split('.')[0] + '.txt') as label_file:\n labels = []\n for line in label_file:\n line = line.split(' ')\n line[-1] = line[-1].rstrip()\n\n img = cv2.imread(f'{data_folder}/{file_name}')\n img_height = img.shape[0]\n img_width = img.shape[1]\n \n x = float(line[1]) * img_width\n y = float(line[2]) * img_height\n w = float(line[3]) * img_width\n h = float(line[4]) * img_height\n\n xmin = int(x - w/2)\n ymin = int(y - h/2)\n xmax = int(x + w/2)\n ymax = int(y + h/2)\n\n labels.append(f' {xmin},{ymin},{xmax},{ymax},{line[0]}')\n if len(labels) > 0:\n write = True\n file_to_write.write(f'{data_folder}/{file_name}')\n for label in labels:\n file_to_write.write(label)\n if write:\n file_to_write.write('\\n') \n processed += 1\n print(f'[{processed}/{number_images}] Processed {file_name}')", "def write_data(data_to_write_train, data_to_write_test, train_path, eval_path ,test_path):\n aligned_lists_train = data_to_write_train[0]\n raw_lists_train = data_to_write_train[2]\n\n aligned_lists_eval = data_to_write_test[0]\n raw_lists_eval = data_to_write_test[2]\n\n aligned_lists_test = data_to_write_test[1]\n raw_lists_test = data_to_write_test[3]\n\n filelist = list([train_path, eval_path, test_path])\n\n for file in filelist:\n aligned_path = os.path.join(file, 'aligned_image')\n raw_path = os.path.join(file, 'raw_image')\n os.mkdir(aligned_path)\n os.mkdir(raw_path)\n\n # raw image data\n for Idx, train_raw in enumerate(raw_lists_train):\n img = Image.open(train_raw)\n img.save(train_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTrain raw images saved! ')\n\n for Idx, eval_raw in enumerate(raw_lists_eval):\n img = Image.open(eval_raw)\n img.save(eval_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tEval raw images saved! ')\n\n for Idx, test_raw in enumerate(raw_lists_test):\n img = Image.open(test_raw)\n img.save(test_path+'/raw_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTest raw images saved! ')\n\n # aligned image data\n for Idx, train_aligned in enumerate(aligned_lists_train):\n img = Image.open(train_aligned)\n img.save(train_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTrain aligned images saved! ')\n\n for Idx, eval_aligned in enumerate(aligned_lists_eval):\n img = Image.open(eval_aligned)\n img.save(eval_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tEval aligned images saved! ')\n\n for Idx, test_aligned in enumerate(aligned_lists_test):\n img = Image.open(test_aligned)\n img.save(test_path+'/aligned_image/img_'+f'{Idx:04d}.jpg')\n if Idx%100 == 0:\n print('\\t%d images are saved'% Idx); \n print('\\tTest aligned images saved! ')", "def coco_split_dataset(\n coco_json_path, image_dir, output_dir, dataset_list_name, ratio_list\n):\n param_info = [\n (coco_json_path, 'coco_json_path', (str, Path)),\n (image_dir, 'image_dir', (str, Path)),\n (output_dir, 'output_dir', (str, Path)),\n (dataset_list_name, 'dataset_list_name', list),\n (ratio_list, 'ratio_list', list)\n ]\n for param in param_info:\n type_sanity(param[0], param[1], param[2])\n\n for dataset_name in dataset_list_name:\n if not isinstance(dataset_name, (str, Path)):\n raise SABaseException(\n 0,\n \"'dataset_list_name' member should be 'str' or 'Path' type, not '%s'\"\n % (type(dataset_name))\n )\n\n for ratio in ratio_list:\n if not isinstance(ratio, (int, float)):\n raise SABaseException(\n 0,\n \"'ratio_list' member should be 'int' or 'float' type, not '%s'\"\n % (type(ratio))\n )\n\n if sum(ratio_list) != 100:\n raise SABaseException(0, \"Sum of 'ratio_list' members must be '100'\")\n\n if len(dataset_list_name) != len(ratio_list):\n raise SABaseException(\n 0, \"'dataset_list_name' and 'ratio_list' should have same lenght\"\n )\n\n if isinstance(image_dir, str):\n image_dir = Path(image_dir)\n if isinstance(output_dir, str):\n output_dir = Path(output_dir)\n\n split_coco(\n coco_json_path, image_dir, output_dir, dataset_list_name, ratio_list\n )", "def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def _make_dataset(input_dir, output_dir, image_size, margin, split='train'):\n input_dir = os.path.join(input_dir, split)\n\n output_root = os.path.join(output_dir, split)\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n class_folders = glob.glob(os.path.join(input_dir, '*'))\n detector = MTCNN()\n\n for class_folder in class_folders:\n target_output_dir = os.path.join(output_root, class_folder.split('/')[-1])\n if not os.path.exists(target_output_dir):\n os.makedirs(target_output_dir)\n\n target_files = glob.glob(os.path.join(class_folder, '*'))\n logger.debug('processing %s...', class_folder)\n for file in target_files:\n img = cv2.imread(file)\n detect_result = detector.detect_faces(img)\n\n if not detect_result:\n logger.warning('WARNING: failed to detect face in file %s, skip', file)\n continue\n\n x0, y0, width, height = detect_result[0]['box']\n x1, y1 = x0 + width, y0 + height\n\n x0 = max(x0 - margin // 2, 0)\n y0 = max(y0 - margin // 2, 0)\n x1 = min(x1 + margin // 2, img.shape[1])\n y1 = min(y1 + margin // 2, img.shape[0])\n\n face_img = img[y0:y1, x0:x1, :]\n face_img = cv2.resize(face_img, dsize=(image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n filename = file.split('/')[-1]\n img_name = filename.split('.')[0]\n cv2.imwrite(os.path.join(target_output_dir, filename),\n face_img)\n with open(os.path.join(target_output_dir, img_name + '.txt'), 'w') as f:\n f.write('%d %d %d %d\\n' % (x0, y0, x1, y1))\n logger.debug('processing %s finished!', class_folder)", "def run(dataset_dir,pic_path):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n training_filename = _get_output_filename(dataset_dir, 'train')\n testing_filename = _get_output_filename(dataset_dir, 'test')\n\n if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n class_names = os.listdir(pic_path)\n labels_to_class_names = dict(zip(class_names,range(len(class_names))))\n \n picnames=[]\n for label in class_names:\n alabel_path=os.path.join(pic_path,label)\n names=os.listdir(alabel_path)\n picnames.extend([os.path.join(alabel_path,name) for name in names])\n random.shuffle(picnames) \n \n train_picnames = picnames[:int(0.7*len(picnames))]\n test_picnames = picnames[int(0.7*len(picnames)):]\n # First, process the training data:\n with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n offset = 0\n for name in train_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Next, process the testing data:\n with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n offset = 0\n for name in test_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(labels_to_class_names.values(),labels_to_class_names.keys())) \n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n with open(os.path.join(dataset_dir,'info.json'),'w') as f:\n info=json.dumps({'num_class':len(class_names),'num_sample_train':len(train_picnames),'num_sample_test':len(test_picnames)})\n f.write(info)\n\n print('\\nFinished converting the dataset in the {}!'.format(pic_path))\n print('\\nThe tfrecord files,info.json and labels file is located in the {}'.format(dataset_dir))" ]
[ "0.6767223", "0.6766674", "0.6721887", "0.66965336", "0.6577553", "0.6528394", "0.6492255", "0.6409701", "0.6390866", "0.6370787", "0.63393295", "0.6304204", "0.63032174", "0.62837404", "0.62496305", "0.6246795", "0.62207115", "0.6187223", "0.6179907", "0.6172584", "0.61718875", "0.61701876", "0.6143775", "0.61342674", "0.61020577", "0.60916394", "0.6078804", "0.60736847", "0.6066579", "0.6063539" ]
0.7158573
0
Saves a dict as yaml file.
def save_yaml(dict_file, yaml_path): with open(yaml_path, "w") as file: documents = yaml.dump(dict_file, file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(dikt):\n with open(SAVE_FILE_NAME, 'w') as save_file:\n yaml.safe_dump(dikt, save_file)", "def save_yaml_to_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n d = i['dict']\n\n try:\n # If using just dump and keys are in unicode,\n # pyyaml adds warning and makes produced yaml unparsable\n s = yaml.safe_dump(d)\n except Exception as e:\n return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}\n\n return save_text_file({'text_file': fn, 'string': s})", "def save_file(filename,d):\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()", "def save(self, filename):\n with open(filename, 'w') as f:\n yaml.dump(self.to_dict(), f, sort_keys=False)", "def save_solution_dict(solution_dict: Dict[int, Dict[str, List[str or Tuple[str, str]]]],\n save_file_path: str = \"solution_dict\"):\n\n if not solution_dict:\n raise ValueError(\"There is no dict containing the solution given.\")\n\n if save_file_path.endswith(\".yaml\"):\n save_file_path = save_file_path.replace(\".yaml\", \"\")\n\n with open(\"{}.yaml\".format(save_file_path), \"w\") as file:\n yaml.safe_dump(solution_dict, file)", "def save_yaml(data, write_path: PathLike) -> None:\n with open(write_path, \"w\") as write_file:\n yaml.dump(data, write_file, default_flow_style=False)", "def write_yaml(yaml_config: Dict[str, Any], filename: str) -> None:\n\n with open(filename, 'w') as outfile:\n yaml.dump(yaml_config, outfile, default_flow_style=False,\n sort_keys=False)", "def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)", "def save_config(config: Dict[str, Any], path: str) -> None:\n\n with open(path, 'w+', encoding='utf-8') as stream:\n yaml.dump(config, stream, default_flow_style=False)", "def store_as_yaml(dataset, dataset_file):\n\n with open(dataset_file, 'w') as outfile:\n yaml.safe_dump(dataset, outfile, default_flow_style=False)", "def save(self, filename=None):\n name = filename or self.filename\n with open(name, \"w\") as stream:\n yaml.dump(self.data, stream, default_flow_style=False)", "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)", "def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)", "def save(config: dict, out_dir: str, filename: str = \"config.yaml\"):\n assert filename.endswith(\".yaml\")\n with open(os.path.join(out_dir, filename), \"w+\") as f:\n f.write(yaml.dump(config))", "def _writeToFile(out_model_dict, model_directory, parent):\n\n fname = compat.getsavefilename(parent=parent,\n caption='Save to file',\n basedir=model_directory)[0]\n\n if len(fname) > 0:\n # enforce correct suffix.\n if not fname.endswith(\".yaml\"):\n fname += \".yaml\"\n\n f = open(fname, \"w\")\n yaml.dump(out_model_dict, f,default_flow_style=False)\n f.close()", "def save_to_yaml(self, path=None):\n\n if not path:\n path = \".\".join([self.name.value, \"yaml\"])\n\n planet_dict = {}\n for a in sorted(self.attributes):\n exo_param = getattr(self, a)\n param_dict = exo_param.__dict__\n param_dict = {k: str(v)\n for k, v in param_dict.items()\n if v and len(str(v)) > 0}\n planet_dict[a] = param_dict\n\n with open(path, 'w') as yamlfile:\n yaml.dump(planet_dict, yamlfile, default_flow_style=False)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def saveData(data, file, path='./data/'):\n\twith open(\"{}{}.yml\".format(path, file), 'w') as out:\n\t\tyaml.dump(data, out)", "def write_yaml(fname: str, data: dict) -> None:\n try:\n with open(fname, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n except IOError as e:\n print(f\"Cannot write YAML file {fname}\")\n print(f\"IOError: {e}\")", "def saveToFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n stream = open(path,\"w\")\n yaml.dump(self.parameters(),stream)", "def save_dict_as_yaml_integration_file(self, output_file: str):\n logger.debug(f\"Writing collected metadata to {output_file}.\")\n\n write_yml(output_file, self.metadata_dict)\n logger.info(\"[green]Finished successfully.[/green]\")", "def dict_to_yaml(dict_data):\n\n return yaml.dump(dict_data, default_flow_style=False)", "def save(self, path):\n logger.debug(path)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n data = yaml.safe_dump(self.to_dict(), default_flow_style=False)\n\n file_path = os.sep.join([path, PROJECT_YAML_FILE])\n\n with open(file_path, 'w') as f:\n f.write(data)", "def save(data, path=\".travis.yml\"):\n if not path:\n path = \".travis.yml\"\n with open(path, 'w') as outfile:\n yaml.dump(data, outfile, Dumper=yaml.RoundTripDumper)", "def dump_yaml(file_path, data):\n\n with open(os.path.abspath(os.path.expanduser(file_path)), \"w\") as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n\n return file_path", "def load():\n with open(SAVE_FILE_NAME, 'r') as save_file:\n dikt = yaml.safe_load(save_file)\n if dikt is None:\n dikt = {}\n return dikt", "def save(self) -> None:\n with open(dict_path, 'w', encoding='utf-8') as dictionary_file:\n json.dump(self.data, dictionary_file, indent=2, separators=(',', ':'), ensure_ascii=False)", "def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)", "def save_dict_to_file(dictionary: dict, dst_path: str) -> None:\n with io.open(file=dst_path, mode=\"w\", encoding=\"utf-8\") as dst:\n for k, v in dictionary.items():\n dst.write(f\"{k} {v}\\n\")\n dst.close()" ]
[ "0.82348216", "0.79898936", "0.79564303", "0.7664049", "0.74451005", "0.7425517", "0.7238184", "0.7232736", "0.72081083", "0.7199662", "0.7193838", "0.70849293", "0.7078301", "0.7078301", "0.70539814", "0.7035597", "0.69795805", "0.6960351", "0.69413203", "0.6937528", "0.69032586", "0.68215823", "0.6798315", "0.6797097", "0.67786926", "0.6750029", "0.67151767", "0.67146957", "0.666865", "0.6659106" ]
0.83138406
0
Wrapper around `tqdm.tqdm` that optionally displays only on the main process.
def tqdm(main_process_only: bool = True, *args, **kwargs): if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") disable = False if main_process_only: disable = PartialState().local_process_index == 0 return _tqdm(*args, **kwargs, disable=disable)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tqdm(*args, **kwargs):\n kwargs_ = dict(file=sys.stdout, disable=C.DISPLAY.PROGRESS.DISABLE, leave=False)\n kwargs_.update(kwargs)\n clear_tqdm()\n return tq.tqdm(*args, **kwargs_)", "def tqdm_notebook(*args, **kwargs): # pragma: no cover\n from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook\n return _tqdm_notebook(*args, **kwargs)", "def tqdm_joblib(tqdm_object):\n\n def tqdm_print_progress(self):\n if self.n_completed_tasks > tqdm_object.n:\n n_completed = self.n_completed_tasks - tqdm_object.n\n tqdm_object.update(n=n_completed)\n\n original_print_progress = joblib.parallel.Parallel.print_progress\n joblib.parallel.Parallel.print_progress = tqdm_print_progress\n\n try:\n yield tqdm_object\n finally:\n joblib.parallel.Parallel.print_progress = original_print_progress\n tqdm_object.close()", "def _get_progress_bar(dataloader: DataLoader, verbose: bool=False) ->Union[DataLoader, 'tqdm.auto.tqdm']:\n return tqdm.auto.tqdm(dataloader) if verbose else dataloader", "def provide_progress_bar(\r\n function, estimated_time, tstep=0.2, tqdm_kwargs={}, args=[], kwargs={}\r\n):\r\n ret = [None] # Mutable var so the function can store its return value\r\n\r\n def myrunner(function, ret, *args, **kwargs):\r\n ret[0] = function(*args, **kwargs)\r\n\r\n thread = threading.Thread(\r\n target=myrunner, args=(function, ret) + tuple(args), kwargs=kwargs\r\n )\r\n pbar = tqdm.tqdm(total=estimated_time, **tqdm_kwargs)\r\n\r\n thread.start()\r\n while thread.is_alive():\r\n thread.join(timeout=tstep)\r\n pbar.update(tstep)\r\n pbar.close()\r\n return ret[0]", "def trange(*args, **kwargs):\n kwargs_ = dict(file=sys.stdout, disable=C.DISPLAY.PROGRESS.DISABLE, leave=False)\n kwargs_.update(kwargs)\n clear_tqdm()\n return tq.trange(*args, **kwargs_)", "def disable_tqdm():\n return (True if (logger.getEffectiveLevel()>=30) else False)", "def disable_tqdm():\n return (True if (logger.getEffectiveLevel()>=30) else False)", "def clear_tqdm():\n inst = getattr(tq.tqdm, '_instances', None)\n if not inst:\n return\n try:\n for _ in range(len(inst)):\n inst.pop().close()\n except Exception: # pylint:disable=broad-except\n pass", "def get_tqdm(unlab_size, findings_cnt, tot_cnt):\n tqdm_predicting = tqdm(total=unlab_size, desc='Predicting', position=0, initial=tot_cnt)\n tqdm_findings = tqdm(total=unlab_size, desc='Findings', \n position=1, bar_format='{desc}:{bar}{n_fmt}', initial=findings_cnt)\n \n return tqdm_predicting, tqdm_findings", "def progress_wrapped(estimated_time, tstep=0.2, tqdm_kwargs={}):\r\n\r\n def real_decorator(function):\r\n @functools.wraps(function)\r\n def wrapper(*args, **kwargs):\r\n return provide_progress_bar(\r\n function,\r\n estimated_time=estimated_time,\r\n tstep=tstep,\r\n tqdm_kwargs=tqdm_kwargs,\r\n args=args,\r\n kwargs=kwargs,\r\n )\r\n\r\n return wrapper\r\n\r\n return real_decorator", "def make_show_progress():\n \n start_time = time.time()\n lines_read = 0\n\n def show_progress(chunk_length):\n \"\"\"Displays a progress line. Created by make_show_progress.\"\"\"\n \n nonlocal lines_read\n\n lines_read += chunk_length\n elapsed_time = int(time.time() - start_time)\n print('{:,} lines read | time {:,}s'.format(lines_read, elapsed_time))\n\n return show_progress", "def show_progress(show, current, max, text, *args):\n if show:\n progress = round((float(current) / max) * 100.0, 0)\n output = \"\\r\" + text.format(*args) + \" {0}% done. \".format(progress) \n sys.stdout.write(output)\n sys.stdout.flush()", "def tqdm_joblib(tqdm_object):\n # https://stackoverflow.com/questions/24983493/tracking-progress-of-joblib-parallel-execution\n class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n tqdm_object.update(n=self.batch_size)\n return super().__call__(*args, **kwargs)\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield tqdm_object\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n tqdm_object.close()", "def tqdm_joblib(tqdm_object):\n class TqdmBatchCompletionCallback:\n def __init__(self, time, index, parallel):\n self.index = index\n self.parallel = parallel\n\n def __call__(self, index):\n tqdm_object.update()\n if self.parallel._original_iterator is not None:\n self.parallel.dispatch_next()\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield tqdm_object\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n tqdm_object.close()", "def call_progress_bar(result_parts, line_no):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n from tqdm.autonotebook import tqdm as tqdm_notebook\n except ImportError:\n raise ImportError(\"Please pip install tqdm to use the progress bar\")\n from IPython import get_ipython\n\n try:\n cell_no = get_ipython().execution_count\n # This happens if we are not in ipython or jupyter.\n # No progress bar is supported in that case.\n except AttributeError:\n return\n pbar_id = f\"{cell_no}-{line_no}\"\n futures = [\n block\n for row in result_parts\n for partition in row\n for block in partition.list_of_blocks\n ]\n bar_format = (\n \"{l_bar}{bar}{r_bar}\"\n if \"DEBUG_PROGRESS_BAR\" in os.environ\n and os.environ[\"DEBUG_PROGRESS_BAR\"] == \"True\"\n else \"{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}\"\n )\n bar_lock.acquire()\n if pbar_id in progress_bars:\n if hasattr(progress_bars[pbar_id], \"container\"):\n if hasattr(progress_bars[pbar_id].container.children[0], \"max\"):\n index = 0\n else:\n index = 1\n progress_bars[pbar_id].container.children[index].max = progress_bars[\n pbar_id\n ].container.children[index].max + len(futures)\n progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)\n progress_bars[pbar_id].refresh()\n else:\n progress_bars[pbar_id] = tqdm_notebook(\n total=len(futures),\n desc=\"Estimated completion of line \" + str(line_no),\n bar_format=bar_format,\n )\n bar_lock.release()\n\n threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()\n\n modin_engine = Engine.get()\n engine_wrapper = None\n if modin_engine == \"Ray\":\n from modin.core.execution.ray.common.engine_wrapper import RayWrapper\n\n engine_wrapper = RayWrapper\n elif modin_engine == \"Unidist\":\n from modin.core.execution.unidist.common.engine_wrapper import UnidistWrapper\n\n engine_wrapper = UnidistWrapper\n else:\n raise NotImplementedError(\n f\"ProgressBar feature is not supported for {modin_engine} engine.\"\n )\n\n for i in range(1, len(futures) + 1):\n engine_wrapper.wait(futures, num_returns=i)\n progress_bars[pbar_id].update(1)\n progress_bars[pbar_id].refresh()\n if progress_bars[pbar_id].n == progress_bars[pbar_id].total:\n progress_bars[pbar_id].close()", "def tqdm_joblib(tqdm_object):\n class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n tqdm_object.update(n=self.batch_size)\n return super().__call__(*args, **kwargs)\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield tqdm_object\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n tqdm_object.close()", "def optional_progressbar(iter: Generator[T, None, None],\n title: Optional[str] = None,\n n: Optional[int] = None,\n progress: Optional[bool] = None,\n time_threshold: float = 5.0) -> Generator[T, None, None]:\n # tqdm is unavailable, use original generator\n if tqdm is None:\n yield from iter\n return\n # Config override\n if progress is None and not config.Config.get_bool('progress'):\n yield from iter\n return\n\n # If length was not given, try to determine from generator (if, e.g., list)\n if n is None:\n try:\n n = len(iter)\n except (TypeError, AttributeError):\n n = None\n\n # Collect starting data\n if progress is True:\n pbar = tqdm(total=n, desc=title)\n else:\n pbar = None\n\n start = time.time()\n for counter, elem in enumerate(iter):\n if pbar is None and (time.time() - start) > time_threshold:\n pbar = tqdm(total=n, desc=title, initial=counter)\n\n yield elem\n\n if pbar is not None:\n pbar.update(1)\n\n if pbar is not None:\n pbar.close()", "def tqdm(iterable, desc='', total=None, leave=True, file=sys.stderr,\n mininterval=0.05, miniters=1, extra=\"\"):\n if total is None:\n try:\n total = len(iterable)\n except TypeError:\n total = None\n \n prefix = desc+': ' if desc else ''\n\n do_rgb = not os.getenv(\"STY\")\n do_ascii = not not os.getenv(\"STY\")\n \n sp = StatusPrinter(file)\n sp.print_status(prefix + format_meter(0, total, 0, do_rgb, do_ascii, extra=extra))\n \n start_t = last_print_t = time.time()\n last_print_n = 0\n n = 0\n for obj in iterable:\n yield obj\n # Now the object was created and processed, so we can print the meter.\n n += 1\n if n - last_print_n >= miniters:\n # We check the counter first, to reduce the overhead of time.time()\n cur_t = time.time()\n if cur_t - last_print_t >= mininterval:\n sp.print_status(prefix + format_meter(n, total, cur_t-start_t, do_rgb, do_ascii, extra=extra))\n last_print_n = n\n last_print_t = cur_t\n \n if not leave:\n sp.print_status('')\n sys.stdout.write('\\r')\n else:\n if last_print_n < n:\n cur_t = time.time()\n sp.print_status(prefix + format_meter(n, total, cur_t-start_t, do_rgb, do_ascii, extra=extra))\n file.write('\\n')", "def maybe_verbose_iterable(data: Iterable, **kwargs) -> Iterable:\n if bool(int(os.getenv(\"det_verbose\", 1))):\n return tqdm(data, **kwargs)\n else:\n return data", "def progress_bar_wrapper(f):\n from functools import wraps\n\n @wraps(f)\n def magic(*args, **kwargs):\n result_parts = f(*args, **kwargs)\n if ProgressBar.get():\n current_frame = inspect.currentframe()\n function_name = None\n while function_name != \"<module>\":\n (\n filename,\n line_number,\n function_name,\n lines,\n index,\n ) = inspect.getframeinfo(current_frame)\n current_frame = current_frame.f_back\n t = threading.Thread(\n target=call_progress_bar,\n args=(result_parts, line_number),\n )\n t.start()\n # We need to know whether or not we are in a jupyter notebook\n from IPython import get_ipython\n\n try:\n ipy_str = str(type(get_ipython()))\n if \"zmqshell\" not in ipy_str:\n t.join()\n except Exception:\n pass\n return result_parts\n\n return magic", "def tqdm_joblib(iterable=None, **kw):\n pbar = tqdm(iterable, **kw)\n\n class TqdmBatchCompletionCallback:\n def __init__(self, time, index, parallel):\n self.index = index\n self.parallel = parallel\n\n def __call__(self, index):\n pbar.update()\n if self.parallel._original_iterator is not None:\n self.parallel.dispatch_next()\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield pbar\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n pbar.close()", "def run_with_progress_bar(num_items: int, fn: Callable, item_type: str = \"doc\") \\\n -> List[pd.DataFrame]:\n # Imports inline to avoid creating a hard dependency on ipywidgets/IPython\n # for programs that don't call this funciton.\n # noinspection PyPackageRequirements\n import ipywidgets\n # noinspection PyPackageRequirements\n from IPython.display import display\n\n _UPDATE_SEC = 0.1\n result = [] # Type: List[pd.DataFrame]\n last_update = time.time()\n progress_bar = ipywidgets.IntProgress(0, 0, num_items,\n description=\"Starting...\",\n layout=ipywidgets.Layout(width=\"100%\"),\n style={\"description_width\": \"12%\"})\n display(progress_bar)\n for i in range(num_items):\n result.append(fn(i))\n now = time.time()\n if i == num_items - 1 or now - last_update >= _UPDATE_SEC:\n progress_bar.value = i + 1\n progress_bar.description = f\"{i + 1}/{num_items} {item_type}s\"\n last_update = now\n progress_bar.bar_style = \"success\"\n return result", "def challenge_one():\n\n import time\n from tqdm import tqdm\n for number in tqdm(range(10)):\n time.sleep(1) #Waits one second before continuing.", "def _prepare(self, progress: BaseProgressMonitor):\n self._started = True\n self._total_bytes = None\n self._downloaded_bytes = 0\n self._progress = progress\n if self.show_progress_bar:\n self._tqdm = tqdm(total=None, unit=\"bytes\", dynamic_ncols=True, file=sys.stdout)\n else:\n self._tqdm = None", "def printProgressBar(iteration, total, pbar=False, prefix = '', suffix = '', decimals = 1, length = 50, fill = 'X', verbose=False):\n\n from .module_exists import module_exists\n from .in_ipynb import in_ipynb\n\n if module_exists('tqdm'):\n if type(pbar) == bool:\n if in_ipynb():\n if verbose: print('- NOTEBOOK MODE -')\n from tqdm import tqdm_notebook as tqdm\n else:\n if verbose: print('- PYTHON/BASH MODE -')\n from tqdm import tqdm\n pbar = tqdm(total=total)\n pbar.update(iteration)\n else:\n pbar.update(iteration-pbar.last_print_n)\n if iteration == total: pbar.close()\n return pbar\n\n else:\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n #print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n print('\\r{0} |{1}| %{2} %{3}'.format(prefix, bar, percent, suffix))\n # Print New Line on Complete\n if iteration == total:\n print()", "def pyout(*args, ex=None):\n trace = traceback.format_stack()[-2].split('\\n')\n _tqdm_write(\"\\033[1;33m\" + trace[0].split(', ')[0].replace(' ', '') + \"\\033[0m\")\n _tqdm_write(\"\\033[1;33m\" + trace[0].split(', ')[1].split(' ')[1] + ':', trace[1].replace(' ', ''), \"\\033[0m\")\n _tqdm_write(*args)\n _tqdm_write(\"\")\n if ex is not None:\n sys.exit(ex)", "def makeProgressbar(target, message=\"\", updateRate=1):\n\n if stderrConnectedToTerm():\n return TerminalProgressbar(target, message)\n\n return FileProgressbar(target, message, updateRate)", "def _should_display_progress(display_progress: bool):\n return (display_progress and is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage())", "def Progress(func):\n def wrapper(*args, **kwargs):\n bar = ttk.Progressbar(master = root, length = 100, mode = 'indeterminate')\n bar.grid(column = 1, row = 11)\n bar.start()\n time.sleep(2)\n result = func(*args, **kwargs)\n try:\n time.sleep(2)\n bar.stop()\n bar.destroy()\n except:\n pass\n return result\n return wrapper" ]
[ "0.7338987", "0.65732175", "0.6251407", "0.60839", "0.6057334", "0.5817028", "0.56528646", "0.56528646", "0.5522953", "0.54382324", "0.54152805", "0.54006755", "0.5382825", "0.5379074", "0.53534347", "0.5342401", "0.527672", "0.5274278", "0.52671427", "0.5219475", "0.51885796", "0.5183253", "0.5137013", "0.50963193", "0.50676537", "0.50654453", "0.5006063", "0.49614537", "0.4955015", "0.49503484" ]
0.8139409
0
This function converts an ipython notebook to a .py file, removes the convert command, and copies the .py to the Anaconda directory where it can be imported by other notebooks. Don't forget to add ' end of .py file'.
def convert_to_py(fname): exec_command("ipython nbconvert --to=python " + fname + ".ipynb") f = open(fname + '.py', 'r') all_lines = f.readlines() f.close() end_line_num = all_lines.index('# end of .py file\n') f = open(fname + '.py', 'w') f.writelines(all_lines[:end_line_num]) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_convert(args):\n try:\n file_path = args.file_name # os.path.join(static_path, args.file_name)\n if args.slides:\n config_path = os.path.join(static_path, \"config\", \"slides_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"slides\",\n \"--CSSHTMLHeaderPreprocessor.style=colorful\",\n \"--reveal-prefix\",\n args.reveal_prefix,\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n else:\n config_path = os.path.join(static_path, \"config\", \"nb_config.py\")\n output = subprocess.check_output(\n [\n \"jupyter\",\n \"nbconvert\",\n file_path,\n \"--to\",\n \"html\",\n \"--config\",\n config_path,\n ],\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n print(output.rstrip())\n _name = get_out_name(args)\n # _name = output.split(\" \")[-1].rstrip()\n if args.c:\n with open(_name, \"r\") as f:\n clean_file = clean_html(f.read())\n with open(_name, \"w\") as f:\n f.write(clean_file)\n if args.bib_name is not None:\n add_ref(_name, args.bib_name, keep_label=args.l, slides=args.slides)\n else:\n with open(_name, \"r\") as f:\n clean_file = clean_html_refs(clean_file)\n with open(_name, \"w\") as f:\n f.write(clean_file)\n except IndexError:\n print(\"Provide the name of the notebook.\")", "def convert_notebook(all_flag, overwrite_flag, filepath):\n context = load_context(Path.cwd())\n\n if not filepath and not all_flag:\n secho(\n \"Please specify a notebook filepath \"\n \"or add '--all' to convert all notebooks.\"\n )\n sys.exit(1)\n\n kedro_project_path = context.project_path\n kedro_package_name = \"za_covid_map\"\n\n if all_flag:\n # pathlib glob does not ignore hidden directories,\n # whereas Python glob does, which is more useful in\n # ensuring checkpoints will not be included\n pattern = kedro_project_path / \"**\" / \"*.ipynb\"\n notebooks = sorted(Path(p) for p in iglob(str(pattern), recursive=True))\n else:\n notebooks = [Path(f) for f in filepath]\n\n counter = Counter(n.stem for n in notebooks)\n non_unique_names = [name for name, counts in counter.items() if counts > 1]\n if non_unique_names:\n raise KedroCliError(\n \"Found non-unique notebook names! \"\n \"Please rename the following: {}\".format(\", \".join(non_unique_names))\n )\n\n for notebook in notebooks:\n secho(\"Converting notebook '{}'...\".format(str(notebook)))\n output_path = (\n kedro_project_path\n / \"src\"\n / kedro_package_name\n / \"nodes\"\n / \"{}.py\".format(notebook.stem)\n )\n\n if output_path.is_file():\n overwrite = overwrite_flag or click.confirm(\n \"Output file {} already exists. Overwrite?\".format(str(output_path)),\n default=False,\n )\n if overwrite:\n export_nodes(notebook, output_path)\n else:\n export_nodes(notebook, output_path)\n\n secho(\"Done!\")", "def post_save(model, os_path, contents_manager):\n if model['type'] != 'notebook':\n return # only do this for notebooks\n d, fname = os.path.split(os_path)\n check_call(['jupyter', 'nbconvert', '--to', 'script', fname], cwd=d)", "def export_notebook(nbbasename, output_directory, output_format):\n\n if output_format == \"html\":\n ipython_command = IPYTHON_NBCONVERT_HTML\n elif output_format == \"pdf\":\n ipython_command = IPYTHON_NBCONVERT_PDF\n else:\n raise ValueError(\"Unknown output format: {}\".format(output_format))\n\n prev_dir = os.getcwd()\n os.chdir(output_directory)\n\n with open(os.devnull, 'w') as devnull:\n subprocess.check_call(ipython_command + [nbbasename], stderr=devnull)\n\n os.chdir(prev_dir)", "def export_notebook():\n #system(\"jupyter nbconvert --to HTML \\\"Look At Enron data set.ipynb\\\"\")\n system(\"jupyter nbconvert --to HTML --output=Look+At+Enron+data+set.html \\\"Look At Enron data set.ipynb\\\"\")\n return", "def post_save(model, os_path, contents_manager):\n if model['type'] != 'notebook':\n return # only do this for notebooks\n # split in directory and file name\n nb_path, nb_filename = os.path.split(os_path)\n # split out filename\n nb_name = os.path.splitext(nb_filename)[0]\n # add .py extension for target python module\n py_name = nb_name + \".py\"\n # defined modules path in /srv (hardcoded to prevent notebooks subfolder relative problems)\n py_path = \"/srv/app/model/\"\n # notebook config path in /srv (hardcoded to prevent notebooks subfolder relative problems)\n nb_template = \"/dltk/.jupyter/jupyter_notebook_conversion.tpl\"\n #print(\"Config path: \" + nb_template)\n #print(\"Source path: \" + os_path)\n #print(\"Destination: \" + py_path)\n # convert notebook to python module using the provided template\n # jupyter nbconvert --to python /srv/notebooks/Splunk_MLTK_notebook.ipynb --output-dir /src/models --template=/srv/config/jupyter_notebook_conversion.tpl\n # /opt/conda/lib/python3.7/site-packages/nbconvert/templates/python.tpl\n # /opt/conda/lib/python3.7/site-packages/nbconvert/templates/skeleton/null.tpl\n check_call(['jupyter', 'nbconvert', '--to', 'python', nb_filename,\n '--output-dir', py_path, '--template=' + nb_template], cwd=nb_path)", "def replace_ipynb(root):\n for (dirpath, dirname, fnames) in os.walk(root):\n for fname in fnames:\n name, ext = os.path.splitext(fname)\n if ext == \".ipynb\":\n in_fpath = \"{}/{}\".format(dirpath, fname)\n out_fpath = \"{}/{}\".format(dirpath, name + \".py\")\n notebook = load_json(in_fpath)\n code = pull_code(notebook)\n write_code(code, out_fpath)", "def write_ipynb(self):\n for nb in self.notebooks:\n nbformat.write(nb.content, os.path.join(self.dst_dir, nb.filename))", "def transform_nb(dirpath, src_fname, tg_fname):\n\n srcfile = os.path.join(dirpath, src_fname)\n tgfile = os.path.join(dirpath, tg_fname)\n\n with open(srcfile, 'r') as fin:\n with open(tgfile, 'w') as fout:\n\n state = True\n skip_next = False\n\n for line in fin:\n\n if state:\n\n if '<FILL IN>' in line:\n skip_next = True\n fout.write(line)\n else:\n if skip_next:\n # This line is ignored, because the above line\n # contains a <FILL IN>\n skip_next = False\n if not line.endswith(',\\n'):\n # This is to avoid problems when the line to\n # remove is the last line in its cell\n fout.write('\" \"\\n')\n else:\n fout.write(line)\n\n if '<SOL>' in line:\n state = False\n else:\n if '</SOL>' in line:\n fout.write('\\n' + line)\n state = True\n\n # Insert backslash in spaces. This is to avoid error in the interpretation\n # of spaces (when they are part of the file name) in os commands.\n f_src = srcfile.replace(' ', '\\\\ ')\n os.system('jupyter nbconvert --to html ' + f_src + ' --output '\n + src_fname.replace('.ipynb', '.html'))\n\n # Clean student version\n f_tg = tgfile.replace(' ', '\\\\ ')\n os.system('jupyter nbconvert --ClearOutputPreprocessor.enabled=True '\n + '--inplace ' + f_tg)\n\n os.system(f'jupyter nbconvert --to slides {f_src}')\n os.system(f'jupyter nbconvert --to pdf {f_src} --output '\n + src_fname.replace('.ipynb', '.pdf'))\n\n return", "def publish(ipynb_path,\n conversion='latex_ipypublish_main',\n outpath=None,\n dump_files=False,\n ignore_prefix='_',\n clear_existing=False,\n create_pdf=False,\n pdf_in_temp=False,\n pdf_debug=False,\n plugin_folder_paths=(),\n dry_run=False):\n # setup the input and output paths\n if isinstance(ipynb_path, string_types):\n ipynb_path = pathlib.Path(ipynb_path)\n ipynb_name = os.path.splitext(ipynb_path.name)[0]\n files_folder = get_valid_filename(ipynb_name) + '_files'\n outdir = os.path.join(\n os.getcwd(), 'converted') if outpath is None else outpath\n\n # log start of conversion\n logger.info('started ipypublish v{0} at {1}'.format(\n ipypublish.__version__, time.strftime(\"%c\")))\n logger.info('logging to: {}'.format(\n os.path.join(outdir, ipynb_name + '.nbpub.log')))\n logger.info('running for ipynb(s) at: {0}'.format(ipynb_path))\n logger.info('with conversion configuration: {0}'.format(conversion))\n\n # merge all notebooks (this handles checking ipynb_path exists)\n final_nb, meta_path = merge_notebooks(ipynb_path,\n ignore_prefix=ignore_prefix)\n logger.debug('notebooks meta path: {}'.format(meta_path))\n\n # find conversion configuration\n logger.info('finding conversion configuration: {}'.format(conversion))\n export_config_path = None\n if isinstance(conversion, string_types):\n outformat_path = pathlib.Path(conversion)\n else:\n outformat_path = conversion\n if outformat_path.exists(): # TODO use pathlib approach\n # if is outformat is a path that exists, use that\n export_config_path = outformat_path\n else:\n # else search internally\n export_config_path = get_export_config_path(\n conversion, plugin_folder_paths)\n\n if export_config_path is None:\n handle_error(\n \"could not find conversion configuration: {}\".format(conversion),\n IOError, logger)\n\n # read conversion configuration and create\n logger.info('loading conversion configuration')\n data = load_export_config(export_config_path)\n logger.info('creating exporter')\n exporter_cls = create_exporter_cls(data[\"exporter\"][\"class\"])\n logger.info('creating template')\n template_name = \"template_file\"\n jinja_template = load_template(template_name, data[\"template\"])\n logger.info('creating nbconvert configuration')\n config = create_config(data[\"exporter\"], template_name,\n {\"${meta_path}\": str(meta_path),\n \"${files_path}\": str(files_folder)})\n\n # run nbconvert\n logger.info('running nbconvert')\n exporter, body, resources = export_notebook(final_nb, exporter_cls,\n config, jinja_template)\n\n # postprocess results\n body, resources, internal_files = postprocess_nb(body, resources)\n\n if dry_run:\n return outpath, exporter\n\n # write results\n logger.info(\"writing results\")\n main_file_name = ipynb_name + exporter.file_extension\n outpath, outfilespath = write_output(body, resources, outdir,\n main_file_name,\n dump_files or create_pdf,\n files_folder, internal_files,\n clear_existing)\n\n # create pdf\n if create_pdf and exporter.output_mimetype == 'text/latex':\n logger.info('running pdf conversion')\n\n if not export_pdf(outpath, outdir=outdir,\n files_path=outfilespath,\n convert_in_temp=pdf_in_temp,\n html_viewer=True,\n debug_mode=pdf_debug):\n handle_error('pdf export failed, try running with pdf_debug=True',\n RuntimeError, logger)\n\n logger.info('process finished successfully')\n\n return outpath, exporter", "def convert_realia():\n local('cd import_scripts;../bin/python import_realia.py')", "def main(path):\n with open(path, 'r') as f:\n notebook = json.load(f)\n notebook[\"cells\"] = [\n cell for cell in notebook[\"cells\"] if cell[\"cell_type\"] == \"markdown\"\n ]\n with open(path.replace(\".ipynb\", \".tmp.ipynb\"), 'w') as f:\n f.write(json.dumps(notebook))", "def convert_appendices():\n local('cd import_scripts;../bin/python import_appendices.py import_appendices')", "def main(args):\n replace_ipynb(args.root)", "def convert_dehaan():\n\n local('cd import_scripts;../bin/python import_dehaan.py import')", "def my_autopep8_folder(folder):\n folder = r'F:\\黎超\\dynamicFC\\Code\\lc_rsfmri_tools_python\\Utils'\n file_name = os.listdir(folder)\n py_name = [filename for filename in file_name if '.py' in filename]\n\n all_cmd = [\"autopep8 --in-place --aggressive --aggressive\" +\n \" \" + pyname for pyname in py_name]\n\n num_py = np.arange(1, len(py_name) + 1)\n len_py = len(py_name)\n for i, cmd, pyname in zip(num_py, all_cmd, py_name):\n print('converting {} ({}/{})...'.format(pyname, i, len_py))\n state = subprocess.call(cmd, shell=True)\n\n if not state:\n print(\"Succeed!\\n\")\n else:\n print(\"Failed!\\n\")\n else:\n print(\"Done!\")", "def publish(ipynb_path,\n outformat='latex_ipypublish_main',\n outpath=None, dump_files=False,\n ignore_prefix='_', clear_files=False,\n create_pdf=False, pdf_in_temp=False, pdf_debug=False):\n if isinstance(ipynb_path,basestring):\n ipynb_path = pathlib.Path(ipynb_path)\n ipynb_name = os.path.splitext(ipynb_path.name)[0]\n files_folder = ipynb_name+'_files'\n\n outdir = os.path.join(os.getcwd(),'converted') if outpath is None else outpath \n if not os.path.exists(outdir):\n os.mkdir(outdir)\n \n logging.info('started ipypublish at {0}'.format(time.strftime(\"%c\")))\n logging.info('logging to: {}'.format(os.path.join(outdir,ipynb_name+'.nbpub.log'))) \n logging.info('running for ipynb(s) at: {0}'.format(ipynb_path))\n logging.info('with conversion: {0}'.format(outformat))\n \n final_nb, meta_path = merge_notebooks(ipynb_path,\n ignore_prefix=ignore_prefix)\n logging.debug('notebooks meta path: {}'.format(meta_path))\n\n logging.info('getting output format from exporter plugin')\n plugins = export_plugins.get()\n if not outformat in plugins:\n logging.error(\"the exporter plugin '{}' does not exist\".format(outformat)\n +\", acceptable names: {}\".format(list(plugins.keys())))\n raise ValueError(\"the exporter plugin '{}' does not exist\".format(outformat)\n +\", acceptable names: {}\".format(list(plugins.keys())))\n oplugin = plugins[outformat]\n \n # ensure file paths point towards the right folder\n oplugin['config']['ExtractOutputPreprocessor.output_filename_template'] = files_folder+'/{unique_key}_{cell_index}_{index}{extension}'\n oplugin['config']['LatexDocLinks.metapath'] = str(meta_path)\n oplugin['config']['LatexDocLinks.filesfolder'] = str(files_folder)\n oplugin['config']['LatexDocHTML.metapath'] = str(meta_path)\n oplugin['config']['LatexDocHTML.filesfolder'] = str(files_folder)\n\n logging.debug('{}'.format(oplugin['config']))\n \n ##for debugging\n # tpath = os.path.join(outdir, ipynb_name+'.template.tpl')\n # with open(tpath, \"w\") as fh:\n # fh.write(str(oplugin['template']))\n \n (body, resources), exe = export_notebook(final_nb, \n oplugin['oformat'],oplugin['config'],oplugin['template'])\n\n # reduce multiple blank lines to single\n body = re.sub(r'\\n\\s*\\n', '\\n\\n', body) \n # make sure references refer to correct slides\n if 'refslide' in resources:\n for k,(col,row) in resources['refslide'].items():\n body = body.replace('{{id_home_prefix}}{0}'.format(k),'#/{0}/{1}{2}'.format(col,row,k))\n\n # filter internal files by those that are referenced in the document body\n if resources['outputs']:\n for path in list(resources['outputs'].keys()):\n if not path in body:\n resources['outputs'].pop(path) \n internal_files = resources['outputs']\n else:\n internal_files = {}\n \n # output main file \n outpath = os.path.join(outdir,ipynb_name+exe)\n logging.info('outputting converted file to: {}'.format(outpath)) \n with open(outpath, \"w\") as fh:\n fh.write(body)\n\n # output external files\n if dump_files or create_pdf:\n outfilespath = os.path.join(outdir,files_folder)\n logging.info('dumping external files to: {}'.format(outfilespath)) \n \n if os.path.exists(outfilespath):\n if clear_files:\n shutil.rmtree(outfilespath)\n else:\n os.mkdir(outfilespath)\n \n for internal_path, fcontents in internal_files.items():\n with open(os.path.join(outdir, internal_path), \"wb\") as fh:\n fh.write(fcontents)\n for external_path in resources['external_file_paths']:\n shutil.copyfile(external_path,\n os.path.join(outfilespath,os.path.basename(external_path)))\n \n if create_pdf and oplugin['oformat'].lower()=='latex':\n logging.info('running pdf conversion') \n \n if not export_pdf(outpath, outdir=outdir, \n files_path=outfilespath,\n convert_in_temp=pdf_in_temp,\n html_viewer=True,\n debug_mode=pdf_debug):\n logging.error('pdf export returned false, try running with pdf_debug=True')\n raise RuntimeError('the pdf export failed, try running with pdf_debug=True')\n \n logging.info('process finished successfully')\n return outpath", "def patch_notebooks(notebooks_dir):\n\n nb_convert_config = Config()\n nb_convert_config.NotebookExporter.preprocessors = [\"nbconvert.preprocessors.ClearOutputPreprocessor\"]\n output_remover = nbconvert.NotebookExporter(nb_convert_config)\n for notebookfile in Path(notebooks_dir).glob(\"**/*.ipynb\"):\n if (\n not str(notebookfile.name).startswith(\"test_\")\n and notebookfile.name not in EXCLUDED_NOTEBOOKS\n ):\n nb = nbformat.read(notebookfile, as_version=nbformat.NO_CONVERT)\n found = False\n for cell in nb[\"cells\"]:\n replace_dict = cell.get(\"metadata\", {}).get(\"test_replace\")\n if replace_dict is not None:\n found = True\n for source_value, target_value in replace_dict.items():\n if source_value not in cell[\"source\"]:\n raise ValueError(\n f\"Processing {notebookfile} failed: {source_value} does not exist in cell\"\n )\n cell[\"source\"] = cell[\"source\"].replace(\n source_value, target_value\n )\n cell[\"source\"] = \"# Modified for testing\\n\" + cell[\"source\"]\n print(\n f\"Processed {notebookfile}: {source_value} -> {target_value}\"\n )\n if not found:\n print(f\"No replacements found for {notebookfile}\")\n nb_without_out, _ = output_remover.from_notebook_node(nb)\n with notebookfile.with_name(f\"test_{notebookfile.name}\").open(\"w\", encoding=\"utf-8\") as out_file:\n out_file.write(nb_without_out)", "def save_current_nb_as_html(info=False):\n assert in_ipynb()\n\n full_path = get_notebook_name()\n path, filename = os.path.split(full_path)\n\n wd_save = os.getcwd()\n os.chdir(path)\n cmd = 'jupyter nbconvert --to html \"{}\"'.format(filename)\n os.system(cmd)\n os.chdir(wd_save)\n\n if info:\n print(\"target dir: \", path)\n print(\"cmd: \", cmd)\n print(\"working dir: \", wd_save)", "def convert_to_latex(self, builder, filename, latex_metadata):\n relative_path = ''\n tex_data = ''\n tex_build_path = self.texdir + relative_path\n pdf_build_path = self.pdfdir + relative_path\n template_folder = builder.config['jupyter_template_path']\n\n\n ensuredir(tex_build_path)\n ensuredir(pdf_build_path)\n\n ## setting the working directory\n os.chdir(self.texdir)\n\n ## copies all theme folder images to static folder\n if os.path.exists(builder.confdir + \"/theme/static/img\"):\n copy_tree(builder.confdir + \"/theme/static/img\", self.texdir + \"/_static/img/\", preserve_symlinks=1)\n else:\n self.logger.warning(\"Image folder not present inside the theme folder\")\n\n fl_ipynb = self.texdir + \"/\" + \"{}.ipynb\".format(filename)\n fl_tex = self.texdir + \"/\" + \"{}.tex\".format(filename)\n fl_tex_template = builder.confdir + \"/\" + template_folder + \"/\" + builder.config['jupyter_latex_template']\n\n ## do not convert excluded patterns to latex\n excluded_files = [x in filename for x in builder.config['jupyter_pdf_excludepatterns']]\n\n if not True in excluded_files: \n ## --output-dir - forms a directory in the same path as fl_ipynb - need a way to specify properly?\n ### converting to pdf using xelatex subprocess\n if sys.version_info[0] < 3:\n subprocess.call([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n else:\n subprocess.run([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n\n ### check if subdirectory\n subdirectory = \"\"\n index = filename.rfind('/')\n if index > 0:\n subdirectory = filename[0:index]\n filename = filename[index + 1:]\n\n ### set working directory for xelatex processing\n os.chdir(self.texdir + \"/\" + subdirectory)\n\n try:\n self.subprocess_xelatex(fl_tex, filename)\n if 'bib_include' in latex_metadata:\n self.subprocess_bibtex(filename)\n self.subprocess_xelatex(fl_tex, filename)\n self.subprocess_xelatex(fl_tex, filename)\n except OSError as e:\n print(e)\n except AssertionError as e:\n pass\n # exit() - to be used when we want the execution to stop on error", "def html2ipynb(path):\n # I don't understand why click isn't handling this?\n path = Path(path)\n if path.is_file() and path.suffix == '.html':\n print(f\"Checking {path}\")\n # Read notebook\n with path.open('r') as f:\n nb = nbformat.v4.new_notebook()\n\n html = f.read()\n soup = BeautifulSoup(html, 'lxml')\n \n for d in soup.findAll(\"div\"):\n if 'class' in d.attrs.keys():\n for clas in d.attrs[\"class\"]:\n if clas in [\"text_cell_render\", \"input_area\"]:\n # code cell\n if clas == \"input_area\":\n cell = nbformat.v4.new_code_cell(d.get_text())\n nb.cells.append(cell)\n\n else:\n cell = nbformat.v4.new_code_cell(d.decode_contents())\n nb.cells.append(cell)\n\n \n outpath = path.with_suffix('.ipynb')\n nbformat.write(nb, outpath.open('w'))", "def _notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n\n # Create a temporary file to write the notebook to.\n # 'with' method is used so the file is closed by tempfile\n # and free to be overwritten.\n # with tempfile.NamedTemporaryFile('w', suffix=\".ipynb\") as fout:\n with tempfile.NamedTemporaryFile(\n \"w\", suffix=\".nbconvert.ipynb\", delete=False\n ) as fout:\n nbpath = fout.name\n\n jupyter_exec = shutil.which(\"jupyter\")\n\n # recent version (~7.3.1) requires output without extension\n out_path = os.path.join(\n os.path.dirname(nbpath), os.path.basename(nbpath).split(\".\", 1)[0]\n )\n args = [\n jupyter_exec,\n \"nbconvert\",\n path,\n \"--output\",\n out_path,\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout=60\",\n ]\n subprocess.check_call(args)\n\n assert os.path.exists(nbpath), \"nbconvert used different output filename\"\n\n nb = nbformat.read(nbpath, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n # Remove the temp file once the test is done\n if os.path.exists(nbpath):\n os.remove(nbpath)\n\n return nb, errors", "def _from_ipynb(path_to_nb, exporter, nbconvert_export_kwargs):\n\n path = Path(path_to_nb)\n\n nb = nbformat.reads(path.read_text(), as_version=nbformat.NO_CONVERT)\n content, _ = nbconvert.export(exporter, nb, **nbconvert_export_kwargs)\n\n if isinstance(content, str):\n path.write_text(content)\n elif isinstance(content, bytes):\n path.write_bytes(content)\n else:\n raise TypeError('nbconvert returned a converted notebook with'\n 'unknown format, only text and binary objects '\n 'are supported')\n\n return content", "def finish_notebook():\n\n current_branch_name = local('git rev-parse --abbrev-ref HEAD',\n capture=True)\n if not current_branch_name.startswith('notebook-'):\n raise Exception(\"You are not in a notebook branch.\")\n the_date = current_branch_name.split('notebook-')[1]\n path_to_notebook = 'content/notebook/{}.html'.format(current_branch_name)\n local('git add {}'.format(path_to_notebook))\n local('git commit {} -m \"Added the notebook for {}.\"'.format(\n path_to_notebook, the_date))\n local('git rebase master')\n local('git checkout master')\n local('git merge {}'.format(current_branch_name))\n local('git push origin master')\n local('git branch -d {}'.format(current_branch_name))\n push()", "def convert_marginalia():\n\n local('cd import_scripts;../bin/python import_marginalia.py import_marginalia')", "def convert_corpusdiplomaticum():\n local('cd import_scripts;../bin/python import_corpusdiplomaticum.py import')", "def strip(notebook):\n for cell in notebook.cells:\n if cell.cell_type == 'code':\n cell.outputs = []\n cell.execution_count = None", "def main(\n):\n music_home = \"/home/banana/music\"\n for inode in list_dir(music_home):\n if basename(inode) in [\n \"annotate\",\n \"metadata\",\n \"sped-up\",\n \"tracklists\",\n ] or isfile(inode):\n continue\n convert(inode)", "def ui_to_py(filename: str,\n filepath: str = os.path.dirname(__file__),\n outputpath: str = os.path.dirname(__file__)) -> None:\n if isinstance(filename,str) and isinstance(filepath,str):\n if not ' ' in filename:\n if os.path.isfile(\"{}\\{}.ui\".format(filepath,filename)):\n filepath = filepath\n filename = filename\n chk_py = os.path.isfile(\"{}\\{}.py\".format(filepath,filename))\n os.system(\"cd {0} & pyuic5 -x {1}.ui -o {1}.py\".format(filepath,filename))\n shutil.move(\"{}\\{}.py\".format(filepath,filename),\"{}\\{}.py\".format(outputpath,filename))\n\n if chk_py:\n print(\"File Converter Info: {}.py file updated.\".format(filename))\n else:\n print(\"File Converter Info: {}.py file created.\".format(filename))\n else:\n print(\"File Converter Alert: The {}.ui file doesn't exist.\".format(filename))\n else:\n print(\"File Converter Error: The filename contains spaces.\")\n else:\n print(\"File Converter Error: Arguments are not string.\")", "def execute_notebook(nb, resources):\n\n if is_ipython_3():\n from IPython.nbconvert.preprocessors import ExecutePreprocessor\n nb, resources = ExecutePreprocessor().preprocess(nb, resources)\n elif runipy_available:\n from runipy.notebook_runner import NotebookRunner\n r = NotebookRunner(nb)\n r.run_notebook(skip_exceptions=True)\n nb = r.nb\n else:\n raise ImportError(\"Can't execute notebooks. Please install IPython >= 3 or runipy.\")\n\n return nb" ]
[ "0.6690791", "0.6655869", "0.59970456", "0.59338504", "0.59105814", "0.5771607", "0.5758488", "0.5513806", "0.5423735", "0.5398756", "0.5371834", "0.53497887", "0.533573", "0.5253323", "0.52508056", "0.5236259", "0.52301824", "0.5228149", "0.52050316", "0.51480156", "0.5031695", "0.5021937", "0.5020281", "0.49905702", "0.4979761", "0.49616408", "0.49310303", "0.49205464", "0.49116755", "0.48605886" ]
0.7036844
0
Return 8x3 list giving the rows, columns, diagonal, and antidiagonal of a 3x3 matrix.
def row_col_diag(arr): three_sets = np.zeros((8,3), dtype=int) for i in range(arr.shape[0]): three_sets[i] = arr[i] for i in range(arr.shape[1]): three_sets[i+3] = arr[:,i] three_sets[6] = np.diag(arr) three_sets[7] = np.diag(np.flipud(arr)) return three_sets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdiag(self):\n out = []\n for x in xrange(0, self.lendiag()):\n out.append(self.retrieve(x))\n return out", "def test_scanmatrixlines_3x3_returns16lists():\n expected = [\n [1, 2, 3], [4, 5, 6], [7, 8, 9],\n [1, 4, 7], [2, 5, 8], [3, 6, 9],\n [7], [4, 8], [1, 5, 9], [2, 6], [3],\n [1], [2, 4], [3, 5, 7], [6, 8], [9]\n ]\n m = problem11.read_matrix('data/test_matrix4.txt')\n assert problem11.scan_matrix_lines(m) == expected", "def get_diags(rows, cols, I, J):\n\n pairs = []\n # Iterate over i, j s.t. i-j == I-J\n # Rearranging, i must satisfy\n # 0 <= i < rows\n # I - J <= i < cols + I - J\n for i in range(\n max(0, I - J),\n min(rows, cols + I - J)\n ):\n j = i - I + J\n if i != I:\n pairs.append((i, j))\n\n # Iterate over i, j s.t. i+j == I+J\n # Rearranging, i must satisfy\n # 0 <= i < rows\n # I + J - cols + 1 <= i < I + J + 1\n for i in range(\n max(0, I + J - cols + 1),\n min(rows, I + J + 1)\n ):\n j = I + J - i\n if i != I:\n pairs.append((i, j))\n return pairs", "def make8UC3(mat):\n mat_8UC = make8UC(mat)\n mat_8UC3 = np.stack((mat_8UC,)*3, axis = -1)\n \n return mat_8UC3", "def wc_matrix(matrix):\n return [{\"A\": position[\"T\"], \"T\": position[\"A\"], \"C\": position[\"G\"], \"G\": position[\"C\"]} for position in matrix[::-1]]", "def matrix4_to_3x4_array(mat):\r\n return tuple(f for v in mat[0:3] for f in v)", "def get_A3(n):\n # Create a matrix B\n Bdiag = -60 * np.eye(n)\n Bupper1 = np.diag([16] * (n - 1), 1)\n Bupper2 = np.diag([-1] * (n - 2), 2)\n Blower1 = np.diag([16] * (n - 1), -1)\n Blower2 = np.diag([-1] * (n - 2), -2)\n B = Bdiag + Bupper1 + Blower1 + Bupper2 + Blower2\n\n # Creat a list [B,B,B,...,B] with n Bs\n blst = [B] * n\n\n # Unpack and rearrange list of Bs into diagonal of matrix A\n A = sp.linalg.block_diag(*blst)\n\n # Upper diagonal array offset by n: we've got (n-1) I blocks\n # each containing n ones\n Dupper1 = np.diag(16*np.ones(n * (n - 1)), n)\n Dupper2 = np.diag(-1*np.ones(n * (n - 2)), 2*n)\n\n # Lower diagonal array offset by -n\n Dlower1 = np.diag(16*np.ones(n * (n - 1)), -n)\n Dlower2 = np.diag(-1*np.ones(n * (n - 2)), -2*n)\n A += Dupper1 + Dlower1 + Dupper2 + Dlower2\n\n # Print the A matrix\n # print A.astype(int) \n return A", "def __diagonal(self,rows,cols):\n diag1 = [row + col for row,col in zip(rows,cols)]\n\n #reverse column elements\n diag2 = [row + col for row,col in zip(rows,cols[::-1])]\n\n return [diag1,diag2]", "def matrix_3d_to_4x4(matrix: np.matrix) -> np.matrix:\n return np.matrix([\n [matrix.item(0, 0), matrix.item(0, 1), matrix.item(0, 2), 0],\n [matrix.item(1, 0), matrix.item(1, 1), matrix.item(1, 2), 0],\n [matrix.item(2, 0), matrix.item(2, 1), matrix.item(2, 2), 0],\n [0, 0, 0, 1]])", "def generate_diagonals(self):\n x = self.square[0]\n y = self.square[1]\n diagonals = [[]]\n \n diagonals.append( ( (x+a, y+a) for a in range(1,8) ) )\n diagonals.append( ( (x+a, y-a) for a in range(1,8) ) )\n diagonals.append( ( (x-a, y+a) for a in range(1,8) ) )\n diagonals.append( ( (x-a, y-a) for a in range(1,8) ) )\n \n return diagonals", "def form_triu_matrix(arr):\n n = int(np.ceil((np.sqrt(1 + 8 * len(arr)) - 1) * 0.5))\n M = np.zeros((n, n))\n c = 0\n for i in range(n):\n for j in range(n):\n if j >= i:\n if c < len(arr):\n M[i, j] = arr[c]\n c += 1\n else:\n break\n return M", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def matrix_2d_to_3d(matrix: np.matrix) -> np.matrix:\n return np.matrix([\n [matrix.item(0, 0), matrix.item(0, 1), 0, matrix.item(0, 2)],\n [matrix.item(1, 0), matrix.item(1, 1), 0, matrix.item(1, 2)],\n [0, 0, 1, 0],\n [matrix.item(2, 0), matrix.item(2, 1), 0, matrix.item(2, 2)]])", "def makeMatrix():\n listOfChars = []\n for ascii in range(32, 128):\n listOfChars.append(chr(ascii))\n random.shuffle(listOfChars)\n matrix = Grid(8, 12)\n i = 0\n for row in range(matrix.getHeight()):\n for column in range(matrix.getWidth()):\n matrix[row][column] = listOfChars[i]\n i += 1\n return matrix", "def generate_3d_board(n):\n layer = nxn_generate.generate_shuffled_2d_board(n)\n cube = []\n for i in range(len(layer)):\n new_layer = []\n for column in layer:\n new_column = []\n # this nested mess is to ensure that none of the sub 3x3 squares violates sudoku rules from any x y or z\n # perspective (also the Latin Square rules but the subsquares are trickier and the cause of more mess)\n for j in range(int(math.sqrt(len(layer)))):\n for k in range(int(math.sqrt(len(layer)))):\n # lot of 3 = (i+j) % 3\n # index within lot = (i + k + (i//3)) % 3\n new_column.append(column[int(math.sqrt(len(layer))) * ((i + j) % int(math.sqrt(len(layer)))) + (\n i + k + (i // int(math.sqrt(len(layer))))) % int(math.sqrt(len(layer)))])\n new_layer.append(new_column)\n cube.append(new_layer)\n\n return shuffle_cube(cube)", "def get_matrix(self):\n return self._matrix[:3, :]", "def diagonal_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:-1,:-1]\n\ty = mat[1:, 1:]\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w-1):\n\t\tfor j in range(h-1):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def build(xaxis, yaxis, zaxis):\n matrix = []\n for floor in range(zaxis):\n roomnum = 1\n matrix.append([])\n for row in range(yaxis):\n matrix[floor].append([])\n for column in range(xaxis):\n matrix[floor][row].append(str(roomnum))\n roomnum += 1\n return matrix", "def matrix_shape(matrix):\n return [*get_length(matrix)]", "def matrix_shape(matrix):\n return [*get_length(matrix)]", "def make_matrix(rows, columns):\n\tmatrix = []\n\tfor row in range(rows):\n\t\tmatrix += [[0] * columns]\n\t\t\n\treturn matrix", "def make_matrix():\n row, col = [int(x) for x in input().split()]\n island = [[int(x) for x in input().split()] for _ in range(row)]\n return row, col, island", "def triangleAdjacency( gen ):\n \n numTri = int( numberOfNodes( gen ) / 3. )\n \n return [ ( 3*i+j, 3*i+((j+1)%3) ) for j in range(3) for i in range(numTri) ]", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def create_fabric_matrix(rows, columns):\n return [['.'] * columns for i in range(rows)]", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def diagonal(self):\n M = self.rep\n m, n = self.shape\n return [M[i, i] for i in range(min(m, n))]", "def get_static_board_layout(things, width, height):\n obj_map = convert_to_dict(things)\n matrix = []\n for yloc in xrange(1, height-1):\n row = []\n for xloc in xrange(1, width-1):\n if obj_map.has_key((xloc, yloc)):\n row.append(obj_map[(xloc, yloc)])\n else:\n row.append('.')\n matrix.insert(0, row)\n return matrix", "def transpose(matrix):\n return list(zip(*matrix))", "def transpose(matrix):\n\n nb_rows = len(matrix)\n nb_cols = len(matrix[0])\n result = [ [None]*nb_rows for k in range(nb_cols)]\n\n for row in range(nb_rows):\n for col in range(nb_cols):\n result[col][row] = matrix[row][col]\n \n return result" ]
[ "0.6542143", "0.64369875", "0.6270617", "0.6098526", "0.6096621", "0.60538954", "0.60484993", "0.6033331", "0.6016216", "0.5939279", "0.5934395", "0.5915065", "0.5816082", "0.5778242", "0.57632416", "0.575525", "0.5744145", "0.5711642", "0.57073534", "0.57073534", "0.57008743", "0.56931394", "0.56904024", "0.56729865", "0.56587917", "0.56306475", "0.56257874", "0.56061953", "0.5598647", "0.5592692" ]
0.7105803
0
Turn 1, 1, and 0 into, respectively, 'X', 'O', and ' '.
def xo_convert(n): if n == 1: return "X" elif n == -1: return "O" else: return " "
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def opp(c):\n return 'x' if c == 'o' else 'o'", "def x_pow_one(nom):\n\tif 'X' in nom and '^' not in nom:\n\t\treturn nom.replace('X', 'X^1')\n\treturn nom", "def bin_to_char(exp):\n new_exp = \"\"\n for i in range(0,len(exp)):\n if exp[i] == \"1\":\n new_exp += \"#\"\n else:\n new_exp += \" \"\n return new_exp", "def _bin_backport(x):\n chars = []\n for n in range(7, -1, -1):\n y = x - 2**n\n if y >= 0:\n chars.append('1')\n x = y\n else:\n chars.append('0')\n return ''.join(chars)", "def ones_conversion(positive_int):\n # I use an index of [-1] to select the one's place as it's on the rightmost place numerically\n positive_int = str(positive_int)\n if int(positive_int[-1]) < 4:\n return 'I' * int(positive_int[-1]) # Can multiply I by the corresponding value as long as it's under 4\n if int(positive_int[-1]) == 4:\n return 'IV'\n if int(positive_int[-1]) == 5:\n return 'V'\n if int(positive_int[-1]) == 6:\n return 'VI'\n if int(positive_int[-1]) == 7:\n return 'VII'\n if int(positive_int[-1]) == 8:\n return 'VIII'\n if int(positive_int[-1]) == 9:\n return 'IX'", "def __str__(self):\n result = \"\"\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 5:\n result += \" x\"\n elif self.board[i][j] == 7:\n result += \" о\"\n else:\n result += \" #\"\n result += \"\\n\"\n return result", "def __str__(self):\n\t\t\n\t\tdef mapping(x):\n\t\t\tif x == 1:\n\t\t\t\t# WHITE\n\t\t\t\treturn 'O'\n\t\t\telif x == -1:\n\t\t\t\t# BLACK\n\t\t\t\treturn 'X'\n\t\t\telse:\n\t\t\t\t# Empty\n\t\t\t\treturn '-'\n\t\t\n\t\ts = 'BLACK - X\\n'\n\t\ts += 'WHITE - O\\n\\n'\n\t\tfor j in self.rows:\n\t\t\ts += j\n\t\t\ts += ' '\n\t\t\ts += ''.join(mapping(self[i+j]) for i in self.columns)\n\t\t\ts += '\\n'\n\t\treturn s + '\\n ' + self.columns + '\\n'", "def x_pow_zero(nom):\n\tif 'X' not in nom:\n\t\treturn nom + '*X^0'\n\treturn nom", "def boolean_to_xo(boolean):\n return {\n False: 'X',\n True: 'O'\n }.get(boolean)", "def __str__(self) -> str:\n return f\"0x{self:X}\"", "def number2x(num, tik='-'):\n return ''.join(['x' if x=='0' else tik for x in list(\"{:08b}\".format(num)) ][::-1]) #<== [::-1] for reverse printing => zero on left etc.", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c", "def x_add_one(nom):\n\tif 'X' in nom and '*' not in nom:\n\t\treturn nom.replace('X', '1*X')\n\treturn nom", "def flip_icon(icon):\n\n return (\"X\" if icon == \"O\" else \"O\")", "def get_eplus_action_encoding(action):\n\n if action > 0:\n action = np.ceil(action*10)/10\n eplus_commands = [action, 1, 1, 1, 0]\n elif action < 0:\n eplus_commands = [0, 1, 0, 0, 1]\n else:\n eplus_commands = [0, 1, 1, 0, 0]\n\n return eplus_commands", "def flip10(haps):\n\touthaps = \"\"\n\tfor a in haps:\n\t\tif (a == \"1\"):\n\t\t\touthaps += \"0\"\n\t\telif (a == \"0\"):\n\t\t\touthaps += \"1\"\n\treturn(outhaps)", "def test_phred_to_ascii(self):\r\n self.assertEqual(phred_to_ascii(0, 120), 'x')\r\n self.assertEqual(phred_to_ascii(1, 119), 'x')", "def __str__(self):\n # special cases\n if self.is_nan() :\n return \"nan\"\n elif self.coeff == 1 :\n if self.expt == 1 :\n return \"x\"\n else :\n return \"x^\" + str(self.expt)\n elif self.coeff == -1 :\n if self.expt == 1 :\n return \"-x\"\n else :\n return \"-x^\" + str(self.expt)\n \n # str_builder\n if self.expt == 0 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator))\n elif self.expt == 1 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x\"\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x\"\n else :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x^\" + str(self.expt)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x^\" + str(self.expt)", "def convert_action(action):\n new = np.zeros(18)\n for act in ['camera', 'forward', 'jump', 'left', 'right']:\n tmp = \"\"\n if act == 'camera':\n new[0] = action[act][0]\n new[1] = action[act][1]\n else:\n tmp += str(action[act])\n new[int(tmp, 2)] = 1\n return new", "def shape_str(shape):\n return '\\n'.join(''.join(map({'X': 'X', None: 'O'}.get, line))\n for line in shape)", "def uX_to_bin(v, x):\n if(v < 0):\n v += (1 << x)\n return bin(v)[2:].rjust(x, '0')", "def state_to_char(observation):\n if observation == self.TileState.CLEAN.value:\n return \"-\"\n if observation == self.TileState.DIRTY.value:\n return \"d\"\n if observation == self.TileState.BOT.value:\n return \"b\"", "def vec2str(vec):\n _str = \"\"\n for i in range(4):\n v = vec[i*43: (i+1)*43]\n _str += chr(np.argwhere(v == 1)[0][0] + ord('0'))\n return _str", "def str2(self):\n signs = [ ('+' if f >= 0 else '-') for f in self.mVector ]\n vals = [ abs(f) for f in self.mVector ]\n\n return '%s %s %si %s %sj %s %sk' % (self.mScalar, \n signs[0],\n vals[0],\n signs[1],\n vals[1],\n signs[2],\n vals[2])", "def onehot_to_string(self, one_hot_seq):\n gen_ints = [np.where(r==1)[0][0] for r in one_hot_seq]\n gen_char_list = self.int_to_char(gen_ints)\n generated_text = ''.join(gen_char_list)\n return generated_text", "def plusMinus(x):\n if x < 0.:\n return 'm'\n else:\n return 'p'", "def hex2oct(x):\n # moreZero = random.choice(range(10))\n moreZero = 0\n return oct(int(x, 16)).zfill(moreZero + len(oct(int(x, 16)))).strip('L')", "def __str__(self):\n if self.alive:\n return \"X\"\n return \".\"", "def get_annotations(x):\n y = np.chararray(x.shape, unicode=True)\n y[:] = 'x'\n y[x == 0] = '<'\n y[x == 1] = 'V'\n y[x == 2] = '>'\n y[x == 3] = '^'\n\n return y", "def onehot_to_chars(self, one_hot_seq):\n gen_ints = [np.where(r==1)[0][0] for r in one_hot_seq]\n gen_chars = self.int_to_char(gen_ints)\n return gen_chars" ]
[ "0.6210847", "0.6182411", "0.6099019", "0.5970165", "0.5955223", "0.5952791", "0.59230816", "0.5864503", "0.584585", "0.581593", "0.5792662", "0.5771425", "0.5751693", "0.57507646", "0.57293314", "0.57267", "0.57137376", "0.5680069", "0.56273484", "0.562184", "0.5616875", "0.5593776", "0.5566329", "0.55561286", "0.55527914", "0.5547263", "0.5538272", "0.5534329", "0.552879", "0.5523028" ]
0.7854444
0
Creates the image in OpenStack if it does not already exist
def create(self): if self.image: return self.image import nova_utils nova = nova_utils.nova_client(self.os_creds) image_dict = None try: # TODO/FIXME - Certain scenarios, such as when the name has whitespace, # the image with a given name is not found.... image_dict = nova.images.find(name=self.image_name) except Exception as e: logger.info('No existing image found with name - ' + self.image_name) pass if image_dict: self.image = self.glance.images.get(image_dict.id) if self.image: logger.info('Found image with name - ' + self.image_name) return self.image self.image_file = self.__get_image_file() self.image = self.glance.images.create(name=self.image_name, disk_format=self.image_format, container_format="bare") logger.info('Uploading image file') self.glance.images.upload(self.image.id, open(self.image_file.name, 'rb')) logger.info('Image file upload complete') return self.image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def glance_create_new_image(glance, images_location, image_info, image_name_prefix=None):\n # image raw file path\n image_raw_source = image_info['image_raw_source']\n image_file = os.path.join(images_location, image_raw_source)\n\n if not os.path.isfile(image_file):\n logger.warning(\"image raw file:'%s' not found!\", image_file)\n return None\n\n fimg = None\n try:\n fimg = open(image_file, 'rb')\n except Exception:\n logger.error(\"Opening raw image file:'%s' failed\", image_file)\n return None\n\n try:\n # image name\n image_name = image_info['image_name']\n if image_name_prefix:\n image_name = \"{}{}\".format(image_name_prefix, image_name)\n logger.debug(\"image_name: %s\", image_name)\n\n # image min_disk\n if image_info['image_min_disk'] == 'auto':\n # compute the size of the file -> min disk size in GB\n imagesize = os.fstat(fimg.fileno()).st_size\n image_min_disk = (imagesize/1024/1024/1024)+1\n else:\n image_min_disk = image_info['image_min_disk']\n logger.debug(\"image_min_disk: %s\", image_min_disk)\n\n # image min_ram\n image_min_ram = image_info['image_min_ram']\n logger.debug(\"image_min_ram: %s\", image_min_ram)\n\n # image properties (dictionary)\n image_properties = image_info['image_properties']\n logger.debug(\"image_properies: %s\", image_properties)\n\n logger.debug(\"glance image create (private): '%s'\", image_name)\n image = glance.images.create(name=image_name,\n visibility='private',\n disk_format='raw',\n container_format='bare',\n min_disk=int(image_min_disk),\n min_ram=int(image_min_ram))\n logger.debug(\"glance image upload: '%s' -> '%s'\", fimg.name, image_name)\n glance.images.upload(image.id, fimg)\n\n except Exception:\n logger.exception(\"Creating and uploading Glance image '%s' failed\", image_name)\n return None\n\n return image", "def create(self, req, body):\n try:\n image = body[\"image\"]\n except (KeyError, TypeError):\n msg = _(\"Invalid image entity\")\n raise webob.exc.HTTPBadRequest(explanation=msg)\n\n try:\n image_name = image[\"name\"]\n instance_id = image[\"serverId\"]\n except KeyError as missing_key:\n msg = _(\"Image entity requires %s\") % missing_key\n raise webob.exc.HTTPBadRequest(explanation=msg)\n\n context = req.environ[\"nova.context\"]\n props = {'instance_id': instance_id}\n\n try:\n image = self._compute_service.snapshot(context,\n instance_id,\n image_name,\n extra_properties=props)\n except exception.InstanceBusy:\n msg = _(\"Server is currently creating an image. Please wait.\")\n raise webob.exc.HTTPConflict(explanation=msg)\n\n return dict(image=self.get_builder(req).build(image, detail=True))", "def _create(self, imagespec):\n if not self.dockerioapi.is_repo_name(imagespec):\n Msg().err(\"Error: must specify image:tag or repository/image:tag\")\n return False\n (imagerepo, tag) = self._check_imagespec(imagespec)\n if imagerepo:\n return ContainerStructure(self.localrepo).create_fromimage(\n imagerepo, tag)\n return False", "def create(self, name, image, command, **kwargs):\n return", "def create_image(self, image=None):\n if image is None:\n image = self.image\n\n current_image_id = self.check_for_updated_image(image)\n if current_image_id is not None:\n return current_image_id\n\n def wait_for_image_state(account, image_id, state, timeout=300):\n state = state.lower()\n current_state = 'unknown'\n\n while current_state != state:\n rc, image_info = self.cal.get_image(account, image_id)\n current_state = image_info.state.lower()\n\n if current_state in ['failed']:\n raise ValidationError('Image [{}] entered failed state while waiting for state [{}]'.format(image_id, state))\n\n if current_state != state:\n time.sleep(1)\n\n if current_state != state:\n logger.error('Image still in state [{}] after [{}] seconds'.format(current_state, timeout))\n raise TimeoutError('Image [{}] failed to reach state [{}] within timeout [{}]'.format(image_id, state, timeout))\n\n return image_info\n\n logger.debug(\"Uploading VM Image: %s\", image.name)\n rc, image_id = self.cal.create_image(self.account, image)\n assert rc == RwTypes.RwStatus.SUCCESS\n image_info = wait_for_image_state(self.account, image_id, 'active')\n\n return image_id", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def create(self, req):\n image_data = json.loads(req.body)['image']\n\n # Ensure the image has a status set\n image_data.setdefault('status', 'active')\n\n context = None\n try:\n image_data = db_api.image_create(context, image_data)\n return dict(image=make_image_dict(image_data))\n except exception.Duplicate:\n msg = (\"Image with identifier %s already exists!\" % id)\n logger.error(msg)\n return exc.HTTPConflict(msg)\n except exception.Invalid, e:\n msg = (\"Failed to add image metadata. Got error: %(e)s\" % locals())\n logger.error(msg)\n return exc.HTTPBadRequest(msg)", "def test_create_image(self):\n pass", "def create_infrastructure():\n\n create_bucket_if_not_exists(BUCKET)", "def create_image(self, instance_id, name,\r\n description=None, no_reboot=False):\r\n params = {'InstanceId' : instance_id,\r\n 'Name' : name}\r\n if description:\r\n params['Description'] = description\r\n if no_reboot:\r\n params['NoReboot'] = 'true'\r\n img = self.get_object('CreateImage', params, Image, verb='POST')\r\n return img.id", "def make_image(self, path):\n\t\treturn None", "def image_create_and_upload(self, upload=True, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(self.__name__ + \"-image\")\n kwargs['name'] = name\n\n params = dict(kwargs)\n image = self.create_image(**params)\n self.assertEqual('queued', image['status'])\n if not upload:\n return image\n\n file_content = data_utils.random_bytes()\n image_file = io.BytesIO(file_content)\n self.client.store_image_file(image['id'], image_file)\n\n image = self.client.show_image(image['id'])\n return image", "def create_image(self):\n # FIXME: this needs to happen, like now.\n logging.debug(\"Machine.create_image entered\")\n logging.debug(\"Old image: %s\" % self.image_id)\n logging.info(\"Creating image of machine %s (%s)\" % (self.machine_name, self.id))\n \n # create the image of the storage\n self.time_image_start = time.time()\n # logging.debug(\"Starting image...\")\n # # m = self.machines[machine]\n # # img_id = m.create_image()\n # # image_map[machine] = img_id\n # self.cloudserver.create_image(\"%s_%s\" % (self.machine_name, self.id), callback=create_image_callback)\n # # time_image_finish = time.time()\n # logging.debug(\"%s imaged to %s\" % (self.machine_name, res.id))\n # elapsed = time_image_finish - time_image_start\n # logging.info(\"Image finished after %d seconds\" % elapsed)\n # # update the configuration for this image\n if self.cloudserver:\n m = self.cloudserver\n else:\n logging.warn(\"Trying to create image for non-existent server %s\" % str(self))\n return False\n try:\n logging.debug(\"starting image creation %s\" % str(self))\n image_id = m.create_image(\"%s_%s\" % (m.name, m.id))\n image = cs.images.get(image_id)\n logging.debug(\"starting waiter thread for %s\" % str(image))\n image = pyrax.utils.wait_until(image, \"status\", [\"ACTIVE\", \"ERROR\"], callback=self.create_image_callback, interval=60)\n\n logging.info(\"%s imaged to %s\" % (m, image))\n # elapsed = time_image_finish - time_image_start\n return image_id\n except novaclient.exceptions.Conflict, e:\n logging.debug(str(e))\n logging.warn(\"Image already in progress for %s\" % str(self))\n return False", "def new_image(path, attendance, data):\n try:\n return Images.objects.get_or_create(path=path.replace(IMG_FOLDER, '', 1),\n attendance=attendance, data=json.dumps(data))\n except:\n return None", "def create(self, vm, snap):\n img, data, request = self.img, self.data, self.request\n\n assert request.dc == vm.dc\n\n data.pop('dc_bound', None) # Default DC binding cannot be changed when creating Image for the first time\n data['dc'] = vm.dc_name # Dc parameter has to be set by system as we are forcing the task to be dc_bound\n\n img.dc_bound = vm.dc # Default DC binding set to VM DC (cannot be changed, ^^^)\n img.ostype = vm.ostype # Default ostype inherited from VM (cannot be changed)\n img.size = snap.disk_size # Default disk size inherited from VM (cannot be changed)\n img.owner = request.user # Default user (can be changed)\n img.alias = img.name # Default alias (can be changed)\n img.status = Image.OK # Set status for preliminary checks\n # Validate data (manifest info)\n ser = ImageSerializer(request, img, data)\n\n if not ser.is_valid():\n return FailureTaskResponse(request, ser.errors, dc_bound=self.dc_bound)\n\n # Preliminary checks\n self._run_checks(img_server_must_exist=True) # This sets self.img_server to ImageVm()\n\n if vm.status not in (vm.RUNNING, vm.STOPPED, vm.STOPPING, vm.FROZEN):\n raise VmIsNotOperational\n\n if snap.status != snap.OK:\n raise ExpectationFailed('VM snapshot status is not OK')\n\n # Build manifest and set PENDING status\n # noinspection PyUnusedLocal\n data = ser.data\n img.manifest = img.build_manifest()\n img.status = Image.PENDING\n img.src_vm = vm\n img.src_snap = snap\n img.save()\n # Set snapshot status to PENDING\n snap.save_status(snap.PENDING)\n # Build command\n cmd_add = ' ; e=$?; cat %s/%s/manifest 2>&1; exit $e' % (self.img_server.datasets_dir, img.uuid)\n cmd = 'esimg create -s %s@%s' % (snap.zfs_filesystem, snap.zfs_name)\n\n if self.img_server.node != vm.node:\n cmd += ' -H %s' % vm.node.address\n\n return self._run_execute(LOG_IMAGE_CREATE, cmd, stdin=img.manifest.dump(), delete_on_error=True, vm=vm,\n snap=snap, error_fun=lambda: snap.save_status(snap.OK), detail_dict=ser.detail_dict(),\n cmd_add=cmd_add)", "def create_one_image(attrs=None):\n attrs = attrs or {}\n\n # Set default attribute\n image_info = {\n 'id': str(uuid.uuid4()),\n 'name': 'image-name' + uuid.uuid4().hex,\n 'owner': 'image-owner' + uuid.uuid4().hex,\n 'container_format': '',\n 'disk_format': '',\n 'min_disk': 0,\n 'min_ram': 0,\n 'is_public': True,\n 'protected': False,\n 'properties': {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'},\n 'status': 'status' + uuid.uuid4().hex,\n }\n\n # Overwrite default attributes if there are some attributes set\n image_info.update(attrs)\n\n return image.Image(**image_info)", "def create_image(image_url, owner, permission=\"PRIVATE\"):\n\n image = Image(image_url=image_url,\n owner=owner,\n permission=permission)\n \n db.session.add(image)\n db.session.commit()\n return image", "def create_image(\n ami_name,\n instance_id=None,\n instance_name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n description=None,\n no_reboot=False,\n dry_run=False,\n filters=None,\n):\n\n instances = find_instances(\n instance_id=instance_id,\n name=instance_name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n return_objs=True,\n filters=filters,\n )\n\n if not instances:\n log.error(\"Source instance not found\")\n return False\n if len(instances) > 1:\n log.error(\n \"Multiple instances found, must match exactly only one instance to create\"\n \" an image from\"\n )\n return False\n\n instance = instances[0]\n try:\n return instance.create_image(\n ami_name, description=description, no_reboot=no_reboot, dry_run=dry_run\n )\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return False", "def createImageFolder():\n try:\n os.makedirs(imageFolder)\n except FileExistsError:\n # Exists, delete contents instead\n clearImageFolder()", "def create_and_upload_image(cls, data=None, **kwargs):\n if 'name' not in kwargs:\n name = data_utils.rand_name(\"kb-image\")\n kwargs['name'] = name\n\n params = cls._get_create_params(**kwargs)\n if data:\n # NOTE: On glance v1 API, the data should be passed on\n # a header. Then here handles the data separately.\n params['data'] = data\n\n image = cls.client.create_image(**params)\n # Image objects returned by the v1 client have the image\n # data inside a dict that is keyed against 'image'.\n if 'image' in image:\n image = image['image']\n cls.created_images.append(image['id'])\n # Upload image to glance artifactory.\n file_content = data_utils.random_bytes()\n image_file = six.BytesIO(file_content)\n cls.client.store_image_file(image['id'], image_file)\n cls.kingbird_client = kb_client.Client(\n kingbird_url=KINGBIRD_URL, auth_token=cls.client.token,\n project_id=cls.client.tenant_id)\n return image", "def create(self,\n name=None,\n image=None,\n network=None,\n size=None,\n location=None,\n timeout=360,\n key=None,\n secgroup=None,\n ip=None,\n user=None,\n public=True,\n group=None,\n metadata=None,\n cloud=None,\n label=None,\n **kwargs):\n image_use = None\n flavor_use = None\n\n # keyname = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n # ex_keyname has to be the registered keypair name in cloud\n\n \"\"\"\n https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_server\n\n \"\"\"\n\n if 'flavor' in kwargs and size is None:\n size = kwargs['flavor']\n\n if network is not None:\n pass\n elif 'network' in kwargs:\n network = kwargs['network']\n elif 'network' in self.default:\n network = self.default['network']\n\n # Guess user name\n\n if user is None:\n user = Image.guess_username(image)\n # image_name = image.lower()\n # if image_name.startswith(\"cc-\"):\n # user = \"cc\"\n # if \"centos\" in image_name:\n # user = \"centos\"\n # elif \"ubuntu\" in image_name:\n # user = \"ubuntu\"\n\n # get IP\n\n if not ip and public:\n ip = self.find_available_public_ip()\n # pprint(entry)\n\n elif ip is not None:\n entry = self.list_public_ips(ip=ip, available=True)\n if len(entry) == 0:\n print(\"ip not available\")\n raise ValueError(f\"The ip can not be assigned {ip}\")\n\n if type(group) == str:\n groups = Parameter.expand(group)\n\n vm_label = label or name\n\n\n banner(\"Create Server\")\n Console.msg(f\" Cloud: {self.cloud}\")\n Console.msg(f\" Name: {name}\")\n Console.msg(f\" Label: {vm_label}\")\n Console.msg(f\" User: {user}\")\n Console.msg(f\" IP: {ip}\")\n Console.msg(f\" Image: {image}\")\n Console.msg(f\" Size: {size}\")\n Console.msg(f\" Network: {network}\")\n Console.msg(f\" Public: {public}\")\n Console.msg(f\" Key: {key}\")\n Console.msg(f\" Location: {location}\")\n Console.msg(f\" Timeout: {timeout}\")\n Console.msg(f\" Secgroup: {secgroup}\")\n Console.msg(f\" Group: {group}\")\n Console.msg(f\" Groups: {groups}\")\n Console.msg(\"\")\n\n # image = self.cloudman.compute.find_image(image)\n # flavor = self.cloudman.compute.find_flavor(size)\n # network = self.cloudman.network.find_network(network)\n\n try:\n server = self.cloudman.create_server(name,\n network=network,\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n\n \"\"\"\n server = self.cloudman.create_server(name,\n networks=[\n {\"uuid\": \"0fa8824d-8a3f-4890-90e1-c3596b3511c6\"}],\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n \"\"\"\n server['user'] = user\n server = self.cloudman.wait_for_server(server)\n server = self.cloudman.add_ips_to_server(server, ips=ip)\n variables = Variables()\n variables['vm'] = name\n if metadata is None:\n metadata = {}\n\n #\n # due to metadata limitation in openstack do not add the creation time\n #\n\n if 'created' in metadata:\n del metadata['created']\n\n metadata['image'] = image\n metadata['flavor'] = size\n metadata['label'] = vm_label\n\n self.cloudman.set_server_metadata(server, metadata)\n\n self.add_secgroup(name=secgroup)\n\n # server = self.cloudman.compute.wait_for_server(server)\n\n # print(\"ssh -i {key} root@{ip}\".format(\n # key=PRIVATE_KEYPAIR_FILE,\n # ip=server.access_ipv4))\n\n except openstack.exceptions.ResourceTimeout:\n Console.error(\"Problem starting vm in time.\")\n raise TimeoutError\n\n except Exception as e:\n Console.error(\"Problem starting vm\", traceflag=True)\n print(e)\n raise RuntimeError\n\n return self.update_dict(server, kind=\"vm\")[0]", "def create(self, name, containerFormat, diskFormat, isPublic, pathFile):\n if isPublic:\n isPublic = \"public\"\n else:\n isPublic = \"private\"\n\n image = self.client.images.create(name=name, container_format=containerFormat, disk_format=diskFormat, is_public=isPublic)\n # Thread ?\n self.client.images.upload(image.id, open(pathFile, 'rb'))\n while image.status == \"queued\":\n image = self.find(image_id=image.id)\n time.sleep(1)\n return self.find(image_id=image.id)", "def create_image(name: str, distribution: str, apt_repo: str, release_track: str, release_label: str, flavour: str,\n organization: str, docker_registry: str, rosdistro_path: pathlib.Path, timestamp:str,\n publish: bool = False):\n\n # Read configuration files\n common_config = yaml.safe_load((rosdistro_path / 'config/recipes.yaml').open())['common']\n recipe = yaml.safe_load((rosdistro_path / 'config/images.yaml').open())['images']\n distro = recipe[name]['distro']\n build_type = recipe[name]['build_type']\n env = source_file(f'{os.environ[\"BUNDLE_ROOT\"]}/{distro}/setup.bash')\n today = timestamp\n extra_vars: List[Any] = []\n\n try:\n package = recipe[name]['package']\n provision_file = recipe[name]['provision_file']\n except KeyError:\n package = '/tailor-image'\n provision_file = f'{build_type}.yaml'\n\n env['ANSIBLE_CONFIG'] = find_package(package, 'ansible.cfg', env)\n template_path = f'/tailor-image/environment/image_recipes/{build_type}/{build_type}.json'\n provision_file_path = find_package(package, 'playbooks/' + provision_file, env)\n\n optional_vars = []\n optional_var_names = ['username', 'password', 'extra_arguments_ansible',\n 'ansible_command', 'description', 'disk_size', 'group']\n\n for var in optional_var_names:\n if var in recipe[name]:\n optional_vars.extend(['-var', f'{var}={recipe[name][var]}'])\n\n if build_type == 'docker':\n image_name = f'tailor-image-{name}-{distribution}-{release_label}'\n docker_registry_data = docker_registry.replace('https://', '').split('/')\n ecr_server = docker_registry_data[0]\n ecr_repository = docker_registry_data[1]\n extra_vars = [\n '-var', f'type={build_type}',\n '-var', f'bundle_flavour={flavour}',\n '-var', f'image_name={image_name}',\n '-var', f'ecr_server={ecr_server}',\n '-var', f'os_version={distribution}',\n '-var', f'ecr_repository={ecr_repository}',\n '-var', f'aws_access_key={os.environ[\"AWS_ACCESS_KEY_ID\"]}',\n '-var', f'aws_secret_key={os.environ[\"AWS_SECRET_ACCESS_KEY\"]}'\n ]\n\n if not publish:\n extra_vars += ['-except', 'publish']\n\n # Make sure we remove old containers before creting new ones\n run_command(['docker', 'rm', '-f', 'default'], check=False)\n\n elif build_type in ['bare_metal', 'lxd'] and publish:\n # Get information about base image\n base_image = recipe[name]['base_image'].replace('$distribution', distribution)\n\n # Get disk size to use\n disk_size = recipe[name].get('disk_size', 9) # In GB\n\n # Get base image\n base_image_local_path = '/tmp/' + base_image\n base_image_key = release_label + '/images/' + base_image\n click.echo(f'Downloading image from {base_image_key}')\n try:\n boto3.resource('s3').Bucket(apt_repo).download_file(base_image_key, base_image_local_path)\n except botocore.exceptions.ClientError:\n click.echo(f'Unable to download base image from {base_image_key}, creating a new one')\n run_command(['bash',\n '/tailor-image/environment/create_base_image.bash',\n f'{base_image_local_path}',\n f'{distribution}'])\n boto3.resource('s3').Bucket(apt_repo).upload_file(base_image_local_path, base_image_key)\n\n # Enable nbd kernel module, necesary for qemu's packer chroot builder\n run_command(['modprobe', 'nbd'])\n\n # Resize image\n run_command(['qemu-img', 'resize', base_image_local_path, '30G'])\n\n # Copy image\n tmp_image = base_image_local_path.replace('disk1', 'disk1-resized')\n run_command(['cp', base_image_local_path, tmp_image])\n\n # Resize partition inside qcow image\n run_command(['virt-resize', '--expand', '/dev/sda1', base_image_local_path, tmp_image])\n run_command(['mv', tmp_image, base_image_local_path])\n\n # Generate image name\n image_name = f'{organization}_{name}_{distribution}_{release_label}_{today}'\n\n extra_vars = [\n '-var', f'image_name={image_name}',\n '-var', f's3_bucket={apt_repo}',\n '-var', f'iso_image={base_image_local_path}',\n '-var', f'distribution={distribution}',\n '-var', f'disk_size={disk_size}'\n ]\n\n # Make sure to clean old image builds\n run_command(['rm', '-rf', '/tmp/images'])\n\n elif build_type == 'ami':\n image_name = f'{organization}_{name}_{distribution}_ami_{release_label}'\n # Get ami-id for base image\n source_ami_id = recipe[name]['source_ami'].get(distribution)\n\n if not source_ami_id:\n click.echo(f'You need to specify a bas AMI for the desired distribution {distribution}')\n sys.exit(1)\n\n # Increase fow how long we wait for image to be ready. Default is 30 minutes, sometime it might take longer\n env['AWS_MAX_ATTEMPTS'] = '90' # minutes\n env['AWS_POLL_DELAY_SECONDS'] = '60' # Poll for status every minute\n\n extra_vars = [\n '-var', f'build_date={today}',\n '-var', f'image_name={image_name}',\n '-var', f'name={name}',\n '-var', f'source_ami_id={source_ami_id}',\n '-var', f'distribution={distribution}',\n '-var', f'release_label={release_label}',\n '-var', f'aws_access_key={os.environ[\"AWS_ACCESS_KEY_ID\"]}',\n '-var', f'aws_secret_key={os.environ[\"AWS_SECRET_ACCESS_KEY\"]}'\n ]\n else:\n return 0\n\n extra_vars.extend(optional_vars)\n\n click.echo(f'Building {build_type} image with: {provision_file}', err=True)\n\n command = ['packer', 'build',\n '-var', f'playbook_file={provision_file_path}',\n '-var', f'organization={organization}',\n '-var', f'bundle_track={release_track}',\n '-var', f'bundle_version={release_label}'] + extra_vars + ['-timestamp-ui', template_path]\n\n run_command(command, env=env, cwd='/tmp')\n\n if build_type in ['bare_metal', 'lxd'] and publish:\n update_image_index(release_label, apt_repo, common_config, image_name)", "def create_molns_image(self):\n file_to_remove = None\n try:\n dockerfile, file_to_remove = self._create_dockerfile(installSoftware.InstallSW.get_command_list())\n image_id = self.docker.build_image(dockerfile)\n return image_id\n except Exception as e:\n logging.exception(e)\n raise ProviderException(\"Failed to create molns image: {0}\".format(e))\n finally:\n if file_to_remove is not None:\n os.remove(file_to_remove)", "def test_image_exists_local(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_not_called()", "def upload_image (auth_url, one_username, one_password, f, server_ip, server_username, server_password, image_dir, ssh_port=22, image_type = \"OS\"):\n import os\n\n try:\n ssh_scp_files(server_ip, server_username, server_password, f, image_dir, ssh_port)\n #ssh_transfer_files(server_ip, server_username, server_password, f, image_dir, ssh_port)\n\n # sife of the file in bytes\n size = os.path.getsize(f)\n # convert to MB\n size = int(size/(1024*1024))\n\n # Resgister the image\n conn = pyone.OneServer(\n auth_url,\n session=\"{0}:{1}\".format(one_username, one_password)\n )\n name, file_extension = os.path.splitext(f)\n description = f\n source = image_dir + f\n \n # find the default datastore\n dsid = 0\n datastores = conn.datastorepool.info()\n for ds in datastores.DATASTORE:\n if ds.NAME == \"default\":\n dsid = ds.ID\n break\n\n # creation of the image template and registration\n #template='''\\nNAME=\"%s\"\\nPATH=\"%s\"\\nTYPE=\"%s\"\\nDESCRIPTION=\"%s\"\\nSIZE=\"%d\"''' % \\\n template='''\\nNAME=\"%s\"\\nPATH=\"%s\"\\nTYPE=\"%s\"\\nDRIVER=\"qcow2\"\\nDESCRIPTION=\"%s\"\\nSIZE=\"%d\"''' % \\\n (name, source, image_type, description, size*3)\n logger.debug(\"template: {}\".format(template))\n logger.debug(\"DSID: {}\".format(dsid))\n r = conn.image.allocate(template,dsid)\n except Exception as e:\n logger.exception(\"Failed uploading image: {}\".format(str(e)))\n delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)\n return \"Failed uploading image: {}\".format(str(e)), 400\n delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)\n return \"Image uploaded successfully\", 201", "def create_snapshot(self):\n # Don't create if it already exists\n if self.image_available(self.snapshot_name):\n print('Snapshot already exists')\n return\n\n self.spawn()\n\n sleep_len = 10\n # Make sure the network is up\n t = 0\n networks = None\n while not networks:\n try:\n networks = self.instances[0].networks\n except:\n # not ready yet\n pass\n print('Waited {0}s for network to be up'.format(t))\n if not networks:\n time.sleep(sleep_len)\n t += sleep_len\n self.instances[0] = self.nova.servers.get(self.instances[0].id)\n\n # make sure an ip is received that we can ssh to\n # self.instances[0].add_floating_ip('129.16.125.236')\n t = 0\n ip = None\n while not ip:\n networks = self.instances[0].networks\n for key in networks:\n if 'IPv4' in key:\n ips = networks[key]\n for i in ips:\n # change to not if we want a floating ip\n if i.startswith('192'):\n ip = i\n break\n break\n if not ip:\n time.sleep(sleep_len)\n print('Waited {0}s for ip'.format(t))\n t += sleep_len\n self.instances[0] = self.nova.servers.get(self.instances[0].id)\n\n # make sure cloud init finishes\n t = 0\n while not self._exists_remote(ip):\n print('Waited {0}s for cloud-init to finish'.format(t))\n time.sleep(sleep_len*3)\n t += sleep_len*3\n # create snapshot and make sure it gets active\n self.nova.servers.create_image(self.instances[0].id, self.snapshot_name, None)\n snapshot = self.nova.glance.find_image(self.snapshot_name)\n\n # Wait until snap\n t = 0\n status = snapshot.status\n while status != 'active':\n print('Waited {0}s for snapshot. Status is {1}'.format(t, status))\n snapshot = self.nova.glance.find_image(self.snapshot_name)\n status = snapshot.status\n time.sleep(sleep_len*3)\n t += sleep_len*3\n print('Snapshot successfully uploaded. Now terminating worker.')\n # kill created worker\n self.terminate_all()", "def setup(self):\n\n exists = [i for i in self.client.images() if self.image in i['RepoTags']]\n\n # Only pull the image if we don't have it\n if not exists or self.pull:\n self.client.pull(self.image)\n self.logger.debug(\"Pulled {}\".format(self.image))\n\n self.container = self.client.create_container(\n image=self.image,\n host_config=self.host_config,\n name=self.name,\n command=self.command,\n environment=self.environment\n )\n self.logger.debug(\"Created container {}\".format(self.container['Id']))", "def add_image(self, image):\r\n metadata = self.collection.find_one( { \"_id\": image.identifier } )\r\n if metadata:\r\n raise ImageFactoryException(\"Image %s already managed, use image_with_id() and save_image()\" % (image.identifier))\r\n\r\n image.persistent_manager = self\r\n basename = self.storage_path + '/' + str(image.identifier)\r\n body_path = basename + BODY_EXT\r\n image.data = body_path\r\n try:\r\n if not os.path.isfile(body_path):\r\n open(body_path, 'w').close()\r\n self.log.debug('Created file %s' % body_path)\r\n except IOError as e:\r\n self.log.debug('Exception caught: %s' % e)\r\n\r\n self._save_image(image)" ]
[ "0.6636871", "0.6582884", "0.6358659", "0.6310891", "0.6277768", "0.62519765", "0.62417156", "0.62242746", "0.61912566", "0.61556125", "0.6065953", "0.605718", "0.6038255", "0.60134196", "0.6013053", "0.60043067", "0.5977835", "0.5965737", "0.5945465", "0.59138167", "0.5913095", "0.5897699", "0.58966595", "0.58908194", "0.58858216", "0.58175915", "0.58019024", "0.5777877", "0.57446444", "0.57437104" ]
0.71788144
0
Returns the image file reference. If the image file does not exist, download it
def __get_image_file(self): if file_utils.file_exists(self.image_file_path): return open(self.image_file_path, 'r') else: if not os.path.exists(self.download_path): os.makedirs(self.download_path) logger.info('Found existing image file') return self.__download_image_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __download_image_file(self):\n if not file_utils.file_exists(self.image_file_path):\n logger.info('Downloading Image from - ' + self.image_url)\n return file_utils.download(self.image_url, self.download_path)", "def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)", "def download_image(self, url):\r\n file_path = os.path.join(self.temp_dir, 'image.png')\r\n urlretrieve(url, file_path)\r\n return file_path", "def get_image_url():", "def download_image(filename, url):\n if not url:\n return url\n refresh_needed = False\n if xbmcvfs.exists(filename) and filename == url:\n # only overwrite if new image is different\n return filename\n else:\n if xbmcvfs.exists(filename):\n xbmcvfs.delete(filename)\n refresh_needed = True\n if xbmcvfs.copy(url, filename):\n if refresh_needed:\n refresh_image(filename)\n return filename\n\n return url", "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "def download(correlation_id, image_url, output_path=None):\n try:\n response = requests.get(image_url, timeout=15)\n if response.ok:\n if not output_path:\n output_path = os.path.join(TMP_FOLDER, '{}.png'.format(correlation_id))\n with open(output_path, 'wb') as f:\n f.write(response.content)\n except Exception as e:\n log.warn('Error downloading [{}]: [{}]'.format(image_url, e))\n output_path = None\n return output_path", "def download(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self).download(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def download_image(url, img_path):\n img_data = requests.get(url).content\n with open(img_path, 'wb') as file:\n file.write(img_data)\n img_path = os.path.abspath(img_path)\n return img_path", "def get_image():\n response = send_file(tempFileObj, as_attachment=True, attachment_filename='marked_image.png')\n return response", "def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img", "def download_pil_image(self, url):\r\n return Image.open(urlopen(url))", "def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()", "def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image", "def downloadImage(self, file_id):\n # check if using full url or partial\n url = \"https://www.sendspace.com/file/{}\".format(file_id) if len(file_id) == 6 else file_id\n\n try:\n r = self.getRequest(url, {}) # GET request for image\n except (RuntimeError) as e:\n raise RuntimeError(\"Error getting download URL for image from \" +\n \"sendspace.\") from e\n\n # the download image retrieved from the uploadImage method does not\n # return a direct download URL. This parses the request to download\n # for the direct download URL.\n dd_url = BeautifulSoup(r.text, \"lxml\").find(\"a\", {\"id\": \"download_button\"})['href']\n\n # download the actual image from the dd_url\n try:\n return BytesIO(self.getRequest(dd_url, {}).content)\n except (RuntimeError) as e:\n raise RuntimeError(\"Error downloading the image from \" +\n \"sendspace.\") from e", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def download_image(image_url, image_name, collection_id):\n try:\n response = requests.get(image_url)\n folder_path = imgs_directory + '/' + collection_id\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n image_path = folder_path + '/' + image_name\n # image_path = os.path.join(folder_path, image_name)\n with open(image_path, 'wb') as f:\n f.write(response.content)\n return image_path\n except Exception as e:\n print(f\"An error occurred while downloading image {image_name}. Error message: {e}\")\n return None", "def download_content(content_link, output_dir):\n if content_link is None: return None\n res = requests.get(content_link, stream=True)\n try:\n res.raise_for_status()\n except requests.exceptions.HTTPError:\n return None\n img_name, img_format = parse_image_url(res.url)\n filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)\n\n with open(filepath, mode='wb') as image_file:\n for chunk in res.iter_content(chunk_size=chunk_size):\n image_file.write(chunk)\n\n return abspath(filepath)", "def download_image(url, filename):\n r = requests.get(url)\n open(filename, 'wb').write(r.content)", "def get_image_link(self):\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all(\"b\")[1].text\n return image_tag['src'], image_name\n\n # image = td.find_all('img')\n # print(image)\n # if image is not None:\n # return urljoin(self.base_url, image['src'])", "def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"vesicle\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def get_image(self, image_id):\n url = self.get_url(image_id)\n return image_util.load_image_from_url(url) if url else None", "def download_image(image_url, temp_path):\n # uuid_name = str(uuid.uuid4())\n # image_postfix = image_url.split('.')[-1]\n # temp_image_name = \"\".join([parent_path, os.path.sep, uuid_name, \\\n # '.', image_postfix])\n urllib.request.urlretrieve(image_url, temp_path)", "def image_downloader(url, file_path, file_name):\n response = requests.get(url, stream=True)\n with open(file_path + \"/\" + file_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']", "def dl_image(img_name, img_url):\n path = os.path.join(base_path, img_name)\n res = requests.get(img_url)\n with open(path, 'wb') as fout:\n fout.write(res.content)", "def store_image(self, http_client, link_hash, src, config):\r\n # check for a cache hit already on disk\r\n image = self.read_localfile(link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n # no cache found download the image\r\n data = self.fetch(http_client, src)\r\n if data:\r\n image = self.write_localfile(data, link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n return None" ]
[ "0.8210503", "0.76130706", "0.73505574", "0.6925539", "0.6918392", "0.69012165", "0.6831053", "0.6781228", "0.67241114", "0.6714646", "0.66244733", "0.66124684", "0.6587157", "0.65493", "0.65449435", "0.6515331", "0.65109783", "0.6509099", "0.6494998", "0.64621663", "0.6431124", "0.6429719", "0.64146274", "0.6413479", "0.6397327", "0.63930285", "0.63875836", "0.6374046", "0.63660604", "0.63512415" ]
0.81886154
1
Downloads the image file
def __download_image_file(self): if not file_utils.file_exists(self.image_file_path): logger.info('Downloading Image from - ' + self.image_url) return file_utils.download(self.image_url, self.download_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)", "def download_image(self, url):\r\n file_path = os.path.join(self.temp_dir, 'image.png')\r\n urlretrieve(url, file_path)\r\n return file_path", "def __get_image_file(self):\n if file_utils.file_exists(self.image_file_path):\n return open(self.image_file_path, 'r')\n else:\n if not os.path.exists(self.download_path):\n os.makedirs(self.download_path)\n logger.info('Found existing image file')\n return self.__download_image_file()", "def download_image(url, filename):\n r = requests.get(url)\n open(filename, 'wb').write(r.content)", "def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)", "def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)", "def dl_image(img_name, img_url):\n path = os.path.join(base_path, img_name)\n res = requests.get(img_url)\n with open(path, 'wb') as fout:\n fout.write(res.content)", "def downloadImage(self, file_id):\n # check if using full url or partial\n url = \"https://www.sendspace.com/file/{}\".format(file_id) if len(file_id) == 6 else file_id\n\n try:\n r = self.getRequest(url, {}) # GET request for image\n except (RuntimeError) as e:\n raise RuntimeError(\"Error getting download URL for image from \" +\n \"sendspace.\") from e\n\n # the download image retrieved from the uploadImage method does not\n # return a direct download URL. This parses the request to download\n # for the direct download URL.\n dd_url = BeautifulSoup(r.text, \"lxml\").find(\"a\", {\"id\": \"download_button\"})['href']\n\n # download the actual image from the dd_url\n try:\n return BytesIO(self.getRequest(dd_url, {}).content)\n except (RuntimeError) as e:\n raise RuntimeError(\"Error downloading the image from \" +\n \"sendspace.\") from e", "def image_downloader(url, file_path, file_name):\n response = requests.get(url, stream=True)\n with open(file_path + \"/\" + file_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img", "def download(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self).download(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def download_image(url, img_path):\n img_data = requests.get(url).content\n with open(img_path, 'wb') as file:\n file.write(img_data)\n img_path = os.path.abspath(img_path)\n return img_path", "def download_pil_image(self, url):\r\n return Image.open(urlopen(url))", "def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()", "def downloadImage(self, url):\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = response.read()\n io = cStringIO.StringIO(data)\n return PIL.Image.open(io)", "def download(url, out_folder):\n \n filename = \"2.png\"\n \n outpath = os.path.join(out_folder, filename)\n \n if url.lower().startswith(\"http\"):\n urlretrieve(url, outpath)\n else:\n urlretrieve(urlparse.urlunparse(parsed), outpath)", "def download_img(url,name):\n resp = download(url)\n if (resp!=None):\n image = np.asarray(bytearray(resp), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n cv2.imwrite(name,image)\n return", "async def dl_image(url, filename):\n\ttry:\n\t\twith aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url) as resp:\n\t\t\t\ttest = await resp.read()\n\t\t\t\twith open('data/tmp/'+filename.lower(), \"wb\") as f:\n\t\t\t\t\tf.write(test)\n\t\t\t\treturn 0\n\texcept Exception as e:\n\t\tprint('[!ERROR!] in Get image')\n\t\tprint(e)\n\t\treturn -1", "def download(self, url):\n req = self.request(url)\n inputfile, outputfile = BytesIO(urlopen(req).read()), BytesIO()\n\n img = Image.open(inputfile)\n img = img.convert(\"RGB\") if img.mode != \"RGB\" else img\n img.thumbnail((192, 192), Image.ANTIALIAS)\n img.save(outputfile, \"JPEG\")\n\n self.image.save(os.path.basename(\n self._clean_url(url)),\n ContentFile(outputfile.getvalue()),\n save=False,\n )", "def download_image(url):\n request = urllib.request.Request(url, headers={'Authorization': 'Bearer %s' % BOT_TOKEN})\n return urllib.request.urlopen(request).read()", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def download(self, url, path_to_dir):\n\n if not os.path.exists(path_to_dir):\n os.makedirs(path_to_dir)\n\n raw_data = self.__class__.get_raw_data(url)\n path_to_image = os.path.join(path_to_dir, url.split('/')[-1].split('?')[0])\n with open(path_to_image, 'wb') as f:\n self.__class__.copy_to(raw_data, f)\n\n return path_to_image", "def download_image(image):\n local = settings.DOWNLOAD_IMAGE_ROOT + '/'\n url = image.url\n thumbnail_url = image.thumbnail_url\n # download image\n local_image = open(local+image.hash+image.ext, 'wb')\n local_image.write(requests.get(url).content)\n local_image.close()\n # download thumbnail_image\n local_image = open(local+image.hash+'b.jpg', 'wb')\n local_image.write(requests.get(thumbnail_url).content)\n local_image.close()", "def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)", "def download_image(image_url, image_name, collection_id):\n try:\n response = requests.get(image_url)\n folder_path = imgs_directory + '/' + collection_id\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n image_path = folder_path + '/' + image_name\n # image_path = os.path.join(folder_path, image_name)\n with open(image_path, 'wb') as f:\n f.write(response.content)\n return image_path\n except Exception as e:\n print(f\"An error occurred while downloading image {image_name}. Error message: {e}\")\n return None", "def download_image(full_image_url, image_name):\r\n\r\n logging.debug('download_image({}, {})'.format(full_image_url, image_name))\r\n\r\n if use_proxy:\r\n img_data = requests.get(full_image_url, proxies=proxies, timeout=15, verify=False).content\r\n else:\r\n img_data = requests.get(full_image_url).content\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n with open(os.path.join(dir_path, image_name), 'wb') as handler:\r\n handler.write(img_data)\r\n image_filesize = os.stat(os.path.join(dir_path, image_name)).st_size\r\n logging.debug('download_image - dir_path = {}'.format(dir_path))\r\n logging.debug('download_image - image_name = {}'.format(image_name))\r\n logging.debug('download_image - image_filesize = {}'.format(image_filesize))\r\n return os.path.join(dir_path, image_name)", "def download_image_from(link, directory, name):\n try:\n img_content = requests.get(link).content\n image_file = io.BytesIO(img_content)\n image = Image.open(image_file).convert('RGB')\n image.save(f'./{directory}/{name}.png', 'PNG', quality=100, subsampling=0)\n except:\n pass", "def download_img_and_save(url, path):\n import requests\n a = url.find(\"UW-EauClaireCOVID-19DataTrackerDashboard\")\n b = len(url)\n fn = url[a:b].replace('/','_')\n fn = '{}/{}'.format(path,fn)\n with open(fn, \"wb\") as f:\n f.write(requests.get(url).content)", "def download_single(data):\n url = data[0]\n image_id = data[1]\n target_path = data[2]\n\n if os.path.exists(target_path):\n return\n\n try:\n response = requests.get(url, timeout=30)\n response.raise_for_status()\n except:\n LOGGER.warning('Failed to fetch url %s (id=%d)', url, image_id)\n return\n\n try:\n content = response.content\n image = Image.open(BytesIO(content))\n except:\n LOGGER.warning('Failed to capture image at url %s (id=%d)', url, image_id)\n return\n\n if not image.format == 'JPEG':\n try:\n image = image.convert('RGB')\n except:\n logging.warning('Failed to convert RGB, %s (id=%d)', url, image_id)\n return\n\n try:\n image.save(target_path, format='JPEG', quality=100)\n except:\n LOGGER.warning('Failed to save url %s (id=%d)', url, image_id)\n return\n\n return" ]
[ "0.77523816", "0.7711154", "0.7289846", "0.7198905", "0.71218306", "0.70532143", "0.7027885", "0.700101", "0.69916904", "0.69828", "0.6942051", "0.6933775", "0.6932163", "0.69152945", "0.6915093", "0.69147563", "0.6903136", "0.6877438", "0.6865126", "0.6805982", "0.6801481", "0.67802095", "0.6775176", "0.67751527", "0.6770729", "0.6725159", "0.6720376", "0.6702792", "0.6676403", "0.66755193" ]
0.8033153
0
Get a single Manhua by its id
def manhuas_id_get(id): # noqa: E501 return query_manager.get_resource(id=id, rdf_type_uri=MANHUA_TYPE_URI, rdf_type_name=MANHUA_TYPE_NAME, kls=Manhua)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, id):\n return Matstamm.find_by_id(id)", "def get_object(id):", "def get(self, _id):", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get(self, cls, id):\n pass", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_by_id(cm_response, **data):\n return cm_response", "def get(self, id):\n return self.__model__.query.get(id)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def select_meme(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"select meme_name from memes where meme_id = ?\", (id, ))\n result = cursor.fetchone()\n cursor.close()\n return result[0]", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get(self, request, id):\n unit = Unit.objects.get(pk=id)\n serializer = UnitSerializer(unit)\n return Response(serializer.data)", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def get(self, id):\n return Freigabe.find_by_id(id)", "def get(self, id):\n return {'id': id}", "def get(id):\n return User.query.filter_by(id=id).first()", "def by_id(cls, id):\n\t\treturn DBSession.query(Power).filter(Power.power_id == id).first()", "def get_by_id(cls, id):\n try:\n return cls.objects.get(id=id)\n except(IntegrityError, OperationalError):\n return None", "async def get(self, hmm_id: str) -> HMM:\n document = await self._mongo.hmm.find_one({\"_id\": hmm_id})\n\n if document:\n return HMM(**document)\n\n raise ResourceNotFoundError()", "def get (self, *k, **kw):\n if kw.has_key ('id'):\n return self.get_by_id (kw['id'])", "def get_by_id(self, id):\n return self._mzml_parser.get_by_id(id)" ]
[ "0.7278572", "0.6999828", "0.6932419", "0.68721354", "0.67988896", "0.67988896", "0.6786893", "0.67067605", "0.67003685", "0.6599542", "0.6571097", "0.65241545", "0.6489759", "0.64433765", "0.64374375", "0.6410633", "0.6384549", "0.635959", "0.6356493", "0.63436085", "0.6339078", "0.63345325", "0.6304907", "0.6293445", "0.62706614", "0.6257795", "0.62469566", "0.6243735", "0.6242976", "0.6230251" ]
0.82101154
0
Private method for finding of download url and name of last version of frida server for android
def __get_url_and_name(self, arch: str): page = requests.get(self.releases_url) page_text = page.text soup = BeautifulSoup(page_text, features="html.parser") regex = re.compile('frida-server-[0-9]{1,2}.[0-9]{1,2}.[0-9]{1,2}-android-' + arch, re.IGNORECASE) frida_server_name = soup.find(text=regex)[0:-3] release_version = re.findall("[0-9]{1,2}.[0-9]{1,2}.[0-9]{1,2}", frida_server_name)[0] return (self.releases_url + '/download/' + release_version + '/' + frida_server_name + ".xz"), frida_server_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_download_url(self):\n devpage = requests.get(DEVPAGE_URL)\n soup = BeautifulSoup(devpage.text, 'html.parser')\n rt = soup.find(id='rightcolumn')\n anchors = rt.findAll('a')\n for anchor in anchors:\n href = anchor.attrs['href']\n if href.endswith('.zip'):\n return href\n\n # if got this far, no GTFS download link found\n return None", "def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename", "def get_download_url():\n return _DOWNLOAD", "def __get_url_addr(self):\n request = urlopen(self.url)\n version = request.readline()\n request.close()\n request = urlparse.urlparse(self.url)\n unparsed_url = urlparse.urlunparse((request.scheme, request.netloc,\n request.path, '', '', ''))\n updated_url = urlparse.urljoin(unparsed_url, version + '/' +\n self.file_name)\n return updated_url", "def get_download_path(self, version=\"latest\"):\n raise NotImplementedError", "def get_latest_agent_filename():\n return urlopen(\n urljoin(\n URL_PATH,\n 'latest_agent'\n )\n ).read().strip()", "def get_server_version():\n url_address = 'https://raw.githubusercontent.com/muhammadfredo/FrMaya/master/FrMaya/version.py'\n url_data = urllib2.urlopen(url_address).read(200)\n result = re.search(r'(\\d+), (\\d+), (\\d+)', url_data, re.MULTILINE)\n if result:\n version_list = [int(v) for v in result.groups()]\n return version_list\n else:\n raise ValueError('Cannot get server version!!!')", "def test_get_url(self):\n package = make_package(version=\"1.1+g12345\")\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'abcdef.cloudfront.net')\n self.assertEqual(parts.path, '/bcc4/mypkg/mypkg-1.1%2Bg12345.tar.gz')\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Key-Pair-Id', 'Expires',\n 'Signature'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['Key-Pair-Id'][0],\n self.settings['storage.cloud_front_key_id'])", "def _download(url):\n \n filename = url.split('/')[-1]\n if os.path.isfile(filename):\n info('Using pre-existed file {} from local system.'.format(filename))\n else:\n info('Downloading {} from OMA Database.'.format(url.split('/')[-1]))\n filename, _ = urlretrieve(url, filename)\n return filename", "def get_download_url(self, version=\"latest\", os_name=None, bitness=None):\n raise NotImplementedError", "def apkdownloadmirror_get_apk_version(soup):\n return list(\n soup.select('.apks .title span')[0].children\n )[1].strip().split(' ')[0]", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_lvfs_detached_signature():\n url = \"https://cdn.fwupd.org/downloads/firmware.xml.gz.asc\"\n ua_string = \"fwupd/1.4.1\"\n r = requests.get(url, headers={\"User-Agent\": ua_string})\n return r.text", "def crawl_version(self, response ):\n hxs = HtmlXPathSelector(response)\n l = ApkcrawlItem()\n l['site'] = self.DOMAIN\n l['is_entry'] = False\n l['entry_url'] = response.url\n l['referer_url'] = response.meta['Referer']\n dw_url = hxs.select(\"//div[@class='installbtn']/a[@class='downtopc']/@href\").extract().pop()\n if dw_url:\n l['download_url'] = urlparse.urljoin( response.url , dw_url )\n l['download_url'] = self.refactor_app_url( l['download_url'] )\n return l", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def load_latest_url(self):\n try:\n fp = open(self.HTTPD_LATEST_URL_FILE)\n latest_url = fp.read()\n fp.close()\n return latest_url\n except:\n return ''", "def apkdownloadmirror_get_apk_url(soup, **_):\n build_id = list(\n soup.select('.apks .title span')[0].children\n )[1].strip().split(' ')[1].strip('()')\n date = parse_date(soup.find(text=re.compile(r'\\s*Date:\\s*')).next)\n apk_id = soup.select('a[data-tag]')[0].\\\n attrs['data-tag'][len('apkupdate-'):]\n rnd = ''.join(\n (\n random.choice(string.ascii_letters + string.digits)\n for _ in range(62)\n )\n )\n return ApkDownloadMirrorManager.APK_DOWNLOAD_URL.format(\n rnd=rnd,\n year=date.year,\n month=date.month,\n apk_id=apk_id,\n build_id=build_id\n )", "def get_download_url(self, ha):\n return create_ipa_url(ha)", "def torrent_filename(view_url, debug):\n r = requests.get(view_url)\n tree = fromstring(r.content)\n title = tree.findtext('.//title')\n filename = title.replace('NT > ', '')\n torrent = filename +'.torrent'\n\n if not debug:\n return torrent\n\n if debug == 'Y':\n # Print variables before returning value\n print \"Title :\", title\n print \"Filename :\", filename\n print \"Torrent :\", torrent\n return torrent", "def _extract_download_link(self, response1):\n \n found = re.search('<ul class=\"dataset\">(.*)</ul>', response1.content, re.IGNORECASE)\n link = \"\"\n if found:\n filelist_HTML = found.group(0).strip()\n found_link = re.search('href=\"(.*)\">', found.group(0), re.IGNORECASE)\n if found_link:\n link = found_link.group(1).strip()\n \n self.assertTrue(link!=\"\",\"Could not find any list of files after rendering html '%s'\" % response1.content)\n return link", "def get_chembl_version(url) -> str:\n\n p = urlparse(url)\n host = p.hostname\n path_str = p.path\n path_obj = pathlib.Path(path_str)\n path_dir = path_obj.parent\n filename = path_obj.name\n\n ftp = ftplib.FTP(host=host)\n ftp.login()\n ftp.cwd(path_str)\n\n files = ftp.nlst()\n\n for f in files: # for each file, see if regex matches. if matches, return this file.\n print(\"F\", f)\n matches = re.match(\"chembl_(\\d+)_sqlite.tar.gz\", f)\n if matches:\n return matches.group(1)\n\n return False", "def get_file_name_from_resposne(r):\n if not r: \n return None\n return get_file_name_from_cd(r.headers.get())", "def _get_version(self):", "def get_mesh_version(url) -> str:\n\n p = urlparse(url)\n host = p.hostname\n path_str = p.path\n path_obj = pathlib.Path(path_str)\n path_dir = path_obj.parent\n filename = path_obj.name\n\n ftp = ftplib.FTP(host=host)\n ftp.login()\n ftp.cwd(path_str)\n\n files = ftp.nlst()\n\n for f in files: # for each file, see if regex matches. if matches, return this file.\n matches = re.match(\"d(\\d+).bin\", f)\n if matches:\n return matches.group(1)\n\n return False", "def __getFile_urllib(self, _src, _dst):\n\n #-------------------- \n # Open the local destination file \n # so that it can start reading in the buffers.\n #-------------------- \n try:\n dstDir = os.path.dirname(_dst) \n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n dstFile = open(_dst, \"wb\")\n except Exception as e:\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n\n #-------------------- \n # Construct the request and authentication handler\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _src)\n request = urllib.request.Request(xnatUrl)\n request.add_header(\"Authorization\", \n self.authHeader['Authorization'])\n\n\n\n #-------------------- \n # Get the response from the XNAT host.\n #-------------------- \n try:\n response = urllib.request.urlopen(request)\n\n\n\n\n #-------------------- \n # If the urllib.request version fails then use http.client.\n # See get_http.client for more details.\n #-------------------- \n #except urllib.request.HTTPError, e:\n except Exception as e:\n #print(str(e))\n #print(f\"{_src} {_dst}\")\n #print(d)\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n #-------------------- \n # Get the content size, first by checking log, then by reading \n # header\n #-------------------- \n self.downloadTracker['downloadedSize']['bytes'] = 0 \n self.downloadTracker['totalDownloadSize'] = \\\n self.getFileSize(xnatUrl)\n if not self.downloadTracker['totalDownloadSize']['bytes']:\n # If not in log, read the header\n if response.headers and \"Content-Length\" in response.headers:\n self.downloadTracker['totalDownloadSize']['bytes'] = \\\n int(response.headers[\"Content-Length\"]) \n self.downloadTracker['totalDownloadSize']['MB'] = \\\n Xnat.utils.bytesToMB(\\\n self.downloadTracker['totalDownloadSize']['bytes'])\n\n\n #-------------------- \n # Start the buffer reading cycle by\n # calling on the buffer_read function above.\n #-------------------- \n bytesRead = self.__bufferRead(xnatUrl, dstFile, response)\n dstFile.close()", "def get_frida_version(self):\n if not self.available():\n return None\n\n result = self._do_adb_command('shell frida --version')\n if result:\n if 'not found' in result or 'No such file or directory' in result:\n result = self._do_adb_command('shell frida-server --version')\n if result and 'not found' in result:\n return None\n elif result:\n self._alternate_frida_name = True\n else:\n return None\n\n result = result.split(os.linesep)\n check_ver = result[len(result) - 2].replace('\\r', '').split('.')\n if len(check_ver) == 3:\n try:\n v_major = int(check_ver[0])\n v_minor = int(check_ver[1])\n v_patch = int(check_ver[2])\n\n if v_major >= 12 and v_minor >= 8:\n return '.'.join(check_ver)\n else:\n #print('frida version is outdated')\n return '.'.join(check_ver)\n except ValueError:\n return None\n\n return None", "def get_latest_version_link(self):\n return self.get_latest_version().dbgap_link", "def __load_last_short_url():\n try:\n return pickle.load(open(\"last_short.p\", \"rb\"))\n except IOError:\n return ''", "def new_version_id_from_response(response):\n new_version_url = response.json()[\"links\"][\"latest_draft\"]\n return int(\n pathlib.PurePosixPath(\n urllib.parse.urlparse(new_version_url).path\n ).parts[-1]\n )", "def get_url_from_era_def(_era,is_signal,maod_version): \n var_format_val=10\n\n GD_File='config/GoogleDocLink'+maod_version+'.txt'\n if is_signal:\n GD_File='config/GoogleDocLinkSignal'+maod_version+'.txt'\n\n GD_File_READ = open (GD_File,\"r\")\n\n for line in GD_File_READ:\n if len(line.split()) ==2 :\n if line.split()[0] == _era:\n GD_File_READ.close()\n return line.split()[1]\n print ('Error in assigning GD page from era')\n return '-11111'" ]
[ "0.6429049", "0.6176465", "0.6167387", "0.6135402", "0.6062519", "0.59826505", "0.59443015", "0.5865929", "0.58576757", "0.5854857", "0.58214295", "0.57686764", "0.5742846", "0.57394814", "0.5711802", "0.56878114", "0.5658921", "0.5655116", "0.56454843", "0.5639779", "0.5626221", "0.5602086", "0.5598653", "0.55905765", "0.5579324", "0.55668", "0.55533606", "0.5527241", "0.55179507", "0.5506036" ]
0.7424144
0
creates stochastic version of Gardner's gene toggle model
def create_model_gene_toggle(max_s1_copies=100, max_s2_copies=100): s1_count = lambda s1, s2 : s1 s2_count = lambda s1, s2 : s2 s1_birth = lambda s1, s2 : 16.0/(1.0+s2) s1_death = lambda s1, s2 : 1.0*s1 s2_birth = lambda s1, s2 : 50.0/(1.0+(s1**2.5)) s2_death = lambda s1, s2 : 1.0*s2 propensities = (s1_birth, s1_death, s2_birth, s2_death) transitions = ((1, 0), (-1, 0), (0, 1), (0, -1)) shape = (max_s1_copies+1, max_s2_copies+1) initial_state = (0, )*2 return model.create( name = 'Gardner\'s gene toggle according to Munsky & Khammash', species = ('S1', 'S2'), species_counts = (s1_count, s2_count), reactions = ('*->S1', 'S1->*', '*->S2', 'S2->*'), propensities = propensities, transitions = transitions, shape = shape, initial_state = initial_state )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _hg_model_fn(features, labels, mode, params):\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)", "def softclippingHGMevaluation(input_generator,branches,iden_method,Plot,reference=None):\n for t in range(8,11):\n t = t / 10.0\n p = 1.0 - t\n input_signal = input_generator.GetOutput()\n nl_functions = [nlsp.function_factory.softclip(power=p),]*branches\n filter_spec_tofind = nlsp.log_bpfilter(branches=branches, input=input_signal)\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n # sine = sumpf.modules.SineWaveGenerator(frequency=5000.0,phase=0.0,samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n sine = sumpf.modules.SweepGenerator(samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n ref_nlsystem.SetInput(sine)\n iden_nlsystem.SetInput(sine)\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n\n if Plot is True:\n plot.relabelandplot(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput()).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplot(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=False)\n print \"SNR between Reference and Identified output for symmetric hardclipping HGM(threshold:%r): %r\" %(t,nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput()))", "def sgd_model(params):\n\n \n if (params['random']):\n params['loss'] = random.choice(['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 'squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'])\n params['penalty'] = random.choice(['none', 'l2', 'l1', 'elasticnet'])\n params['alpha'] = random.choice([0.001, 0.0001, 0.00001])\n model = SGDClassifier(\n loss=params['loss'],\n penalty=params['penalty'],\n alpha=params['alpha']\n )\n\n return model", "def get_model(n_obs=100, ess=50, ug=None, seed_obs=None):\n if ug is None:\n ug = np.zeros((4, 4))\n ug[0, 1:3] = 1\n ug[1:3, 0] = 1\n\n m = elfi.new_model()\n priors = []\n dag, node_ordering, oc = ug_to_dag(ug)\n para_mat = mn_para_mat(ug)\n combs_to_node = 2 ** np.sum(dag, axis=0)\n n_dim = np.sum(combs_to_node).astype(int)\n alpha = ess / 2 / oc.shape[0] * np.ones(n_dim)\n no_connections = np.where(np.sum(dag, axis=0) == 0)[0].astype(int)\n alpha[no_connections] = ess / 2\n\n for i in np.arange(n_dim):\n name_prior = 'a_{}'.format(i)\n prior_beta = elfi.Prior('beta',\n alpha[i],\n alpha[i],\n model=m,\n name=name_prior)\n priors.append(prior_beta)\n\n sim_fn = partial(gmn_simulate,\n ug=ug,\n n=n_obs,\n ess=ess,\n dag=dag,\n node_ordering=node_ordering,\n oc=oc,\n para_mat=para_mat)\n a_true = 0.2 * np.ones((n_dim, 1))\n y = sim_fn(a_true)\n\n elfi.Simulator(sim_fn, *priors, observed=y, name='GMN')\n elfi.Summary(sumstats, m['GMN'], oc.shape[0], n_obs, name='S')\n elfi.Distance('euclidean', m['S'], name='d')\n\n return m", "def generate_scenario_2(seed=1996, permanent_save=True, sigma_process=0.01, sigma_meas_radar=3, sigma_meas_ais=1,\n timesteps=20):\n start_time = datetime.now()\n\n # specify seed to be able repeat example\n np.random.seed(seed)\n\n # combine two 1-D CV models to create a 2-D CV model\n transition_model = CombinedLinearGaussianTransitionModel(\n [ConstantVelocity(sigma_process), ConstantVelocity(sigma_process)])\n\n # starting at 0,0 and moving NE\n truth = GroundTruthPath([GroundTruthState([0, 1, 0, 1], timestamp=start_time)])\n\n # generate truth using transition_model and noise\n for k in range(1, timesteps):\n truth.append(GroundTruthState(\n transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)),\n timestamp=start_time + timedelta(seconds=k)))\n\n # Simulate measurements\n # Specify measurement model for radar\n measurement_model_radar = LinearGaussian(\n ndim_state=4, # number of state dimensions\n mapping=(0, 2), # mapping measurement vector index to state index\n noise_covar=np.array([[sigma_meas_radar, 0], # covariance matrix for Gaussian PDF\n [0, sigma_meas_radar]])\n )\n\n # Specify measurement model for AIS (Same as for radar)\n measurement_model_ais = LinearGaussian(\n ndim_state=4,\n mapping=(0, 2),\n noise_covar=np.array([[sigma_meas_ais, 0],\n [0, sigma_meas_ais]])\n )\n\n # generate \"radar\" measurements\n measurements_radar = []\n for state in truth:\n measurement = measurement_model_radar.function(state, noise=True)\n measurements_radar.append(Detection(measurement, timestamp=state.timestamp))\n\n # generate \"AIS\" measurements\n measurements_ais = []\n for state in truth:\n measurement = measurement_model_ais.function(state, noise=True)\n measurements_ais.append(Detection(measurement, timestamp=state.timestamp))\n\n if permanent_save:\n save_folder_name = seed.__str__()\n else:\n save_folder_name = \"temp\"\n\n save_folder = \"../scenarios/scenario2/\" + save_folder_name + \"/\"\n\n # save the ground truth and the measurements for the radar and the AIS\n store_object.store_object(truth, save_folder, \"ground_truth.pk1\")\n store_object.store_object(measurements_radar, save_folder, \"measurements_radar.pk1\")\n store_object.store_object(measurements_ais, save_folder, \"measurements_ais.pk1\")\n store_object.store_object(start_time, save_folder, \"start_time.pk1\")\n store_object.store_object(measurement_model_radar, save_folder, \"measurement_model_radar.pk1\")\n store_object.store_object(measurement_model_ais, save_folder, \"measurement_model_ais.pk1\")\n store_object.store_object(transition_model, save_folder, \"transition_model.pk1\")", "def generate_scenario_1(seed=1996, permanent_save=True, sigma_process=0.01, sigma_meas_radar=3, sigma_meas_ais=1):\n # specify seed to be able repeat example\n start_time = datetime.now()\n\n np.random.seed(seed)\n\n # combine two 1-D CV models to create a 2-D CV model\n transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(sigma_process),\n ConstantVelocity(sigma_process)])\n\n # starting at 0,0 and moving NE\n truth = GroundTruthPath([GroundTruthState([0, 1, 0, 1], timestamp=start_time)])\n\n # generate truth using transition_model and noise\n for k in range(1, 21):\n truth.append(GroundTruthState(\n transition_model.function(truth[k - 1], noise=True, time_interval=timedelta(seconds=1)),\n timestamp=start_time + timedelta(seconds=k)))\n\n # Simulate measurements\n # Specify measurement model for radar\n measurement_model_radar = LinearGaussian(\n ndim_state=4, # number of state dimensions\n mapping=(0, 2), # mapping measurement vector index to state index\n noise_covar=np.array([[sigma_meas_radar, 0], # covariance matrix for Gaussian PDF\n [0, sigma_meas_radar]])\n )\n\n # Specify measurement model for AIS\n measurement_model_ais = LinearGaussian(\n ndim_state=4,\n mapping=(0, 2),\n noise_covar=np.array([[sigma_meas_ais, 0],\n [0, sigma_meas_ais]])\n )\n\n # generate \"radar\" measurements\n measurements_radar = []\n for state in truth:\n measurement = measurement_model_radar.function(state, noise=True)\n measurements_radar.append(Detection(measurement, timestamp=state.timestamp))\n\n # generate \"AIS\" measurements\n measurements_ais = []\n state_num = 0\n for state in truth:\n state_num += 1\n if not state_num % 2: # measurement every second time step\n measurement = measurement_model_ais.function(state, noise=True)\n measurements_ais.append(Detection(measurement, timestamp=state.timestamp))\n\n if permanent_save:\n save_folder_name = seed.__str__()\n else:\n save_folder_name = \"temp\"\n\n save_folder = \"../scenarios/scenario1/\" + save_folder_name + \"/\"\n\n # save the ground truth and the measurements for the radar and the AIS\n store_object.store_object(truth, save_folder, \"ground_truth.pk1\")\n store_object.store_object(measurements_radar, save_folder, \"measurements_radar.pk1\")\n store_object.store_object(measurements_ais, save_folder, \"measurements_ais.pk1\")\n store_object.store_object(start_time, save_folder, \"start_time.pk1\")\n store_object.store_object(measurement_model_radar, save_folder, \"measurement_model_radar.pk1\")\n store_object.store_object(measurement_model_ais, save_folder, \"measurement_model_ais.pk1\")\n store_object.store_object(transition_model, save_folder, \"transition_model.pk1\")", "def eg4(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg4_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n noise_feature = np.random.randn(n, p_noise)\n stable_feature_dependent = np.zeros([n, p_stable])\n stable_feature_independent = np.random.randn(n, p_stable)\n for i in range(p_stable):\n stable_feature_dependent[:, i] = noise_feature[:, i % p_noise] + noise_feature[:,\n (i + 1) % p_noise] + 2 * np.random.randn(\n n) # still need noise\n stable_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n stable_depend_label = np.concatenate([stable_depend_label] * p_stable, axis=1)\n stable_feature = np.where(stable_depend_label < depend_ratio, stable_feature_dependent,\n stable_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n Y = np.matmul(stable_feature, b) + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg4'\n return data\n\n data_train = eg4_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg4_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def create_model(self, gwas_gen, gwas_phen):\n n_ind, n_snps = gwas_gen.eval().shape\n with pm.Model() as phenotype_model:\n beta_med = pm.Normal('beta_med',\n mu=self.vars['coef_mean'],\n sd=self.vars['coef_sd'],\n shape=(1, n_snps))\n \n mediator = pm.dot(beta_med, gwas_gen.T)\n intercept = pm.Normal('intercept', mu=0, sd=1)\n alpha = pm.Normal('alpha', mu=0, sd=1)\n phenotype_sigma = pm.HalfCauchy('phenotype_sigma',\n beta=self.vars['p_sigma_beta'])\n \n\n # Model Selection\n p = np.array([0.5, 0.5])\n mediator_model = pm.Bernoulli('mediator_model', p[1])\n\n # Model 1\n phenotype_mu_null = intercept\n\n # Model 2\n phenotype_mu_mediator = intercept + alpha * mediator\n\n phen = pm.DensityDist('phen',\n lambda value: pm.switch(mediator_model, \n pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value), \n pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)\n ),\n observed=gwas_phen)\n self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),\n pm.Metropolis()]\n\n \n if self.variational and self.mb:\n self.minibatch_RVs = [phen]\n self.minibatch_tensors = [gwas_gen, gwas_phen]\n return phenotype_model", "def GenerateS(s): \n for i in range(len(x_train)):\n for j in range(len(SVs)):\n index = int(SVs[j])\n s[i] = s[i] + alpha[index]*y_train[index] * Kernel(i, index) # (DotProduct(i, index)+1)**5\n return s", "def make_smiley_training_set(num_points=0, delta=0.05):\n log.out.info(\"Generating happy data.\")\n # Select coordinates to do an XOR like operation on\n coords = []\n bools = []\n x_min = 0.0\n x_max = 1.0\n y_min = 0.0\n y_max = 1.0\n for i in range(num_points):\n # Add num_points randomly\n coord_point = np.random.random(2)\n coord_point[0] = coord_point[0] * (x_max - x_min) + x_min\n coord_point[1] = coord_point[1] * (y_max - y_min) + y_min\n coords.append(coord_point)\n\n # Assign an xor boolean value to the coordinates\n for coord_point in coords:\n x = coord_point[0]\n y = coord_point[1]\n if (abs(x - 0.65) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif (abs(x - 0.35) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif ((x > 0.2) & (x < 0.8) &\n (abs(y - ((1.5 * (x - 0.5))**2 + 0.25)) < delta)):\n bools.append(True)\n else:\n bools.append(False)\n\n # Build training vectors\n train_in = None\n train_out = None\n for i, coord in enumerate(coords):\n # Need to initialize the arrays\n if i == 0:\n train_in = np.array([coord])\n train_out = np.array([[bools[i]]])\n else:\n train_in = np.append(train_in, np.array([coord]), axis=0)\n train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)\n\n train_out = train_out.T\n return train_in, train_out", "def test_oss_sk_estimator():\n check_estimator(OneSidedSelection)", "def model(n, input_dims, encs, eval_points, stim_func, conn_synapse=0.1,\n probe_synapse=0.01):\n\n print 'Building model.'\n with nengo.Network() as net:\n \n neuron_type = nengo.LIF() \n\n ipt = nengo.Node(stim_func)\n ens = nengo.Ensemble(n,\n dimensions=input_dims,\n encoders=encs,\n eval_points=eval_points,\n neuron_type=neuron_type)\n\n nengo.Connection(ipt, ens, synapse=None, transform=1)\n conn = nengo.Connection(ens, ens, synapse=conn_synapse)\n\n probe = nengo.Probe(ens, attr='decoded_output',\n synapse=probe_synapse)\n \n print 'Building simulation.' \n return nengo.Simulator(net), probe", "def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"", "def generate_syntetic_data( self ):\n label_list = []\n img_ground_truth = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n img = np.zeros( ( self.img_h, self.img_w, 3 ), dtype = np.float32 )\n class_blue = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n class_red = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n\n '''line, square, grill, rectangle, cross'''\n if self.option_shape == 'line':\n sample_type = 0\n elif self.option_shape == 'circle':\n sample_type = 1\n elif self.option_shape == 'rectangle':\n sample_type = 2\n else: \n sample_type = np.random.randint( 3 ) #0,1,2\n\n if sample_type == 0:\n self.generate_line( img_ground_truth, img, class_blue, class_red )\n elif sample_type == 1:\n self.generate_circle( img_ground_truth, img, class_blue, class_red )\n else:\n self.generate_rectangle( img_ground_truth, img, class_blue, class_red )\n\n label_list.append( class_blue )\n label_list.append( class_red )\n # 2 classes generates\n label_all = np.dstack( label_list ).astype( np.float32 ) \n r, g, b = cv2.split( img )\n img_bgr = cv2.merge( [ b, g, r ] )\n\n gen_adj = GenerateAdjMatrx( type_dist = self.type_dist )\n A_gt = gen_adj.adjmatrx_groundthuth( img_ground_truth )\n\n B_in = gen_adj.adjmatrx_groundthuth(img_ground_truth * 0)\n\n return img_bgr, img_ground_truth, label_all, A_gt, B_in", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def spikingModel(wEE, wEI, wIE, wII, stim_e, stim_i,\n time=1000, dt=0.1, Vth=1.0, Vre=0.0,\n tau_e=15.0, tau_i=10.0, ref_e=5.0, ref_i=5.0, \n syntau2_e=3.0, syntau2_i=2.0, syntau1=1.0):\n\n T = np.arange(0,time,dt)\n nE = wEE.shape[0]\n nI = wII.shape[0]\n\n Ve = np.zeros((nE,len(T)))\n Vi = np.zeros((nI,len(T)))\n # Set initial conditions\n Ve = np.random.uniform(0,1,size=(nE,))\n Vi = np.random.uniform(0,1,size=(nI,))\n # Instantiate synaptic currents empty matrix\n Ie = np.zeros((nE,len(T)))\n Ii = np.zeros((nI,len(T)))\n # Instantiate spiking matrix\n spkE = np.zeros((nE,time))\n spkI = np.zeros((nI,time))\n # Instantiate synaptic input matrix (temporally downsampled)\n synE = np.zeros((nE,time))\n synI = np.zeros((nI,time))\n\n bin_spkE = np.zeros((nE,))\n bin_spkI = np.zeros((nI,))\n # Synaptic rise gating variable\n xrse_ee = np.zeros((nE,))\n xdec_ee = np.zeros((nE,))\n xrse_ei= np.zeros((nI,))\n xdec_ei = np.zeros((nI,))\n xrse_ie = np.zeros((nE,))\n xdec_ie = np.zeros((nE,))\n xrse_ii= np.zeros((nI,))\n xdec_ii = np.zeros((nI,))\n\n\n # Set random biases from a uniform distribution\n # Excitatory neurons\n mu_e = np.random.uniform(1.1,1.2,size=(nE,))\n #mu_e = np.random.uniform(1.05,1.15,size=(nE,)) # Imbalanced state\n # Inhibitory neurons\n mu_i = np.random.uniform(1.0,1.05,size=(nI,))\n\n maxrate = 500 # max rate is 100hz\n maxtimes = int(np.round(maxrate*time/1000))\n timesE = np.zeros((nE,maxrate))\n timesI = np.zeros((nI,maxrate))\n ne_s = np.zeros((nE,),dtype=int)\n ni_s = np.zeros((nI,),dtype=int)\n\n refractory_e = np.zeros((nE,))\n refractory_i = np.zeros((nI,))\n for t in range(len(T)-1):\n ## Using RK2 method\n\n ## K1s\n Ve = Ve + dt*((mu_e + stim_e - Ve)/tau_e + Ie[:,t])\n Vi = Vi + dt*((mu_i + stim_i - Vi)/tau_i + Ii[:,t])\n\n # Synaptic gating\n # Excitatory synapses\n xrse_ee = xrse_ee - dt*xrse_ee/syntau1 + np.matmul(bin_spkE,wEE)\n xdec_ee = xdec_ee - dt*xdec_ee/syntau2_e + np.matmul(bin_spkE,wEE)\n xrse_ei = xrse_ei - dt*xrse_ei/syntau1 + np.matmul(bin_spkE,wEI)\n xdec_ei = xdec_ei - dt*xdec_ei/syntau2_e + np.matmul(bin_spkE,wEI)\n # Inhibitory dt*synapses\n xrse_ie = xrse_ie - dt*xrse_ie/syntau1 + np.matmul(bin_spkI,wIE)\n xdec_ie = xdec_ie - dt*xdec_ie/syntau2_i + np.matmul(bin_spkI,wIE)\n xrse_ii = xrse_ii - dt*xrse_ii/syntau1 + np.matmul(bin_spkI,wII)\n xdec_ii = xdec_ii - dt*xdec_ii/syntau2_i + np.matmul(bin_spkI,wII)\n\n # Calculate synaptic outputs given rise and decay times\n Ie[:,t+1] = (xdec_ee - xrse_ee)/(syntau2_e - syntau1) + (xdec_ie - xrse_ie)/(syntau2_i - syntau1)\n Ii[:,t+1] = (xdec_ii - xrse_ii)/(syntau2_i - syntau1) + (xdec_ei - xrse_ei)/(syntau2_e - syntau1)\n\n ## Spiking\n # Find which neurons exceed threshold (and are not in a refractory period)\n bin_spkE = np.multiply(Ve>Vth, refractory_e==0.0)\n bin_spkI = np.multiply(Vi>Vth, refractory_i==0.0)\n\n # Save spike time (and downsample to 1ms)\n tms = int(np.floor(T[t]))\n spkE[bin_spkE,tms] = 1 # spikes are okay - refractory period is 5ms, anyway\n spkI[bin_spkI,tms] = 1\n synE[:,tms] = synE[:,tms] + Ie[:,t]\n synI[:,tms] = synI[:,tms] + Ii[:,t]\n\n # Reset voltages\n Ve[bin_spkE] = Vre\n Vi[bin_spkI] = Vre\n\n # spike times\n timesE[bin_spkE,ne_s[bin_spkE]] = T[t+1]\n timesI[bin_spkI,ni_s[bin_spkI]] = T[t+1]\n ne_s[bin_spkE] = ne_s[bin_spkE] + 1\n ni_s[bin_spkI] = ni_s[bin_spkI] + 1\n\n\n # Set refractory period\n # Add a refractory time step to neurons who just spiked, and to those are still in a refractory period\n refractory_e = refractory_e + (bin_spkE * dt) + (refractory_e!=0) * dt \n refractory_i = refractory_i + (bin_spkI * dt) + (refractory_i!=0) * dt\n # Once refractory period is complete, allow to spike\n can_spike_again_e = np.round(refractory_e,1) == ref_e\n can_spike_again_i = np.round(refractory_i,1) == ref_i\n\n refractory_e[can_spike_again_e] = 0.0\n refractory_i[can_spike_again_i] = 0.0\n\n # Set neurons who are in their refractory to the baseline membrane potential\n in_refractory_e = refractory_e != 0.0\n in_refractory_i = refractory_i != 0.0\n\n Ve[in_refractory_e] = Vre\n Vi[in_refractory_i] = Vre\n \n return spkE, spkI, synE, synI, timesE, timesI, ne_s, ni_s", "def make_model(n_dimensions, seed):\n with spa.SPA(seed=seed) as model:\n # Create the state holding element\n model.state = spa.State(dimensions=n_dimensions,\n feedback=1.0, feedback_synapse=0.01)\n\n # Create the state transitions\n actions = spa.Actions(*(\"dot(state, {}) --> state = {}\".format(x, y) for\n (x, y) in zip(\"ABCDE\", \"BCDEA\")))\n model.bg = spa.BasalGanglia(actions=actions)\n model.thal = spa.Thalamus(model.bg)\n\n # Create the input for the initial state\n model.input = spa.Input(state=lambda t: 'A' if t < 0.05 else '0')\n\n return model", "def create_sts_model(train_x, train_y):\n model = GaussianNB()\n model.fit(train_x, train_y)\n save_model(model, \"simple_time_series\")\n return model", "def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)", "def eui_modesign():\n\n # setup design space\n # N ds ws wc lc g\n GAP = Struct()\n GAP.gd_min = np.array([1, 1e-3, 1e-3, 1e-3, 1e-3, 1e-5])\n GAP.gd_max = np.array([1e3, 1e-1, 1e-1, 1e-1, 1e-1, 1e-2])\n\n\n # setup genetic algorithm parameters--------------------------------------\n nobj=2 # number of objectives\n ngen=100 # number of generations\n npop = 100 # population size\n \n problem = MyProblem()\n problem.n_var = len(GAP.gd_min)\n problem.n_obj = nobj\n problem.n_constr = 2\n problem.xl = GAP.gd_min\n problem.xu = GAP.gd_max\n problem.elementwise_evaluation = True\n\n algorithm = NSGA2(\n pop_size=npop,\n eliminate_duplicates=True\n )\n \n # conduct the optimization-------------------------------------------------\n res = minimize(problem, algorithm, (\"n_gen\", ngen), verbose=True)\n\n # save results-------------------------------------------------------------\n return res", "def build_model(X_train, Y_train):\n #Choosing a straighforward single tree model to make training tractable in terms of time\n DTC = DecisionTreeClassifier(random_state = 11)\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=DTC))\n ])\n\n parameters = {'clf__estimator__criterion': [\"gini\", \"entropy\"],\n 'clf__estimator__splitter': [\"best\", \"random\"],\n 'clf__estimator__max_depth': randint(3, 6),\n 'clf__estimator__min_samples_split': randint(2,6)}\n\n grid_obj = RandomizedSearchCV(pipeline,parameters,n_iter=5, cv=5 )\n grid_obj.fit(X_train, Y_train)\n\n return grid_obj.best_estimator_", "def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_gru_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_bias = deepcopy(gene1.bias)\n gene1_bias_hh = deepcopy(gene1.bias_hh)\n gene1_bias_ih = deepcopy(gene1.bias_ih)\n gene1_weight_hh = deepcopy(gene1.weight_hh)\n gene1_weight_ih = deepcopy(gene1.weight_ih)\n gene1_weight_ih_full = deepcopy(gene1.weight_ih_full)\n gene2_act = deepcopy(gene2.activation)\n gene2_bias = deepcopy(gene2.bias)\n gene2_bias_hh = deepcopy(gene2.bias_hh)\n gene2_bias_ih = deepcopy(gene2.bias_ih)\n gene2_weight_hh = deepcopy(gene2.weight_hh)\n gene2_weight_ih = deepcopy(gene2.weight_ih)\n gene2_weight_ih_full = deepcopy(gene2.weight_ih_full)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.add_input_key(cfg=cfg, k=-1)\n gene3.update_weight_ih()\n gene3.activation = 'c'\n gene3.bias = -10\n gene3.bias_hh[0] = -10 # Make modifications directly on the vector\n gene3.bias_ih[0] = -10 # Make modifications directly on the vector\n gene3.weight_hh[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih_full[0, 0] = -10 # Make modifications directly on the vector\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(np.linalg.norm(gene1.bias_hh - gene1_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.bias_ih - gene1_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_hh - gene1_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih - gene1_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih_full - gene1_weight_ih_full), 0)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.bias, gene2_bias)\n self.assertEqual(np.linalg.norm(gene2.bias_hh - gene2_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.bias_ih - gene2_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_hh - gene2_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih - gene2_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih_full - gene2_weight_ih_full), 0)", "def build_ensemble(model, ens):\n\n # Create random number generator\n rng = np.random.RandomState(model.seeds[ens])\n\n eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)\n\n # Set up signal\n model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),\n name=\"%s.signal\" % ens)\n model.add_op(Reset(model.sig[ens]['in']))\n\n # Set up encoders\n if isinstance(ens.neuron_type, Direct):\n encoders = np.identity(ens.dimensions)\n elif isinstance(ens.encoders, Distribution):\n encoders = get_samples(\n ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)\n else:\n encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)\n encoders /= npext.norm(encoders, axis=1, keepdims=True)\n\n # Build the neurons\n gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)\n\n if isinstance(ens.neuron_type, Direct):\n model.sig[ens.neurons]['in'] = Signal(\n np.zeros(ens.dimensions), name='%s.neuron_in' % ens)\n model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']\n model.add_op(Reset(model.sig[ens.neurons]['in']))\n else:\n model.sig[ens.neurons]['in'] = Signal(\n np.zeros(ens.n_neurons), name=\"%s.neuron_in\" % ens)\n model.sig[ens.neurons]['out'] = Signal(\n np.zeros(ens.n_neurons), name=\"%s.neuron_out\" % ens)\n model.sig[ens.neurons]['bias'] = Signal(\n bias, name=\"%s.bias\" % ens, readonly=True)\n model.add_op(Copy(model.sig[ens.neurons]['bias'],\n model.sig[ens.neurons]['in']))\n # This adds the neuron's operator and sets other signals\n model.build(ens.neuron_type, ens.neurons)\n\n # Scale the encoders\n if isinstance(ens.neuron_type, Direct):\n scaled_encoders = encoders\n else:\n scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]\n\n model.sig[ens]['encoders'] = Signal(\n scaled_encoders, name=\"%s.scaled_encoders\" % ens, readonly=True)\n\n # Inject noise if specified\n if ens.noise is not None:\n model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)\n\n # Create output signal, using built Neurons\n model.add_op(DotInc(\n model.sig[ens]['encoders'],\n model.sig[ens]['in'],\n model.sig[ens.neurons]['in'],\n tag=\"%s encoding\" % ens))\n\n # Output is neural output\n model.sig[ens]['out'] = model.sig[ens.neurons]['out']\n\n model.params[ens] = BuiltEnsemble(eval_points=eval_points,\n encoders=encoders,\n intercepts=intercepts,\n max_rates=max_rates,\n scaled_encoders=scaled_encoders,\n gain=gain,\n bias=bias)", "def create_model(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window):\n model = smooth_tfactor(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window)\n opt = torch.optim.SGD(model.parameters(),lr=0.001)\n return model, opt", "def create_smurf():\n\n selfsup_transform = smurf_augmentation.build_selfsup_transformations(\n crop_height=FLAGS.selfsup_crop_height,\n crop_width=FLAGS.selfsup_crop_width,\n resize=FLAGS.resize_selfsup)\n\n smurf = SMURFNet(\n checkpoint_dir=FLAGS.checkpoint_dir,\n optimizer=FLAGS.optimizer,\n learning_rate=learning_rate_fn,\n only_forward=FLAGS.only_forward,\n dropout_rate=FLAGS.dropout_rate,\n selfsup_transform=selfsup_transform,\n fb_sigma_teacher=FLAGS.fb_sigma_teacher,\n fb_sigma_student=FLAGS.fb_sigma_student,\n train_mode=FLAGS.train_mode,\n smoothness_edge_weighting=FLAGS.smoothness_edge_weighting,\n smoothness_edge_constant=FLAGS.smoothness_edge_constant,\n teacher_image_version=FLAGS.teacher_image_version,\n stop_gradient_mask=FLAGS.stop_gradient_mask,\n selfsup_mask=FLAGS.selfsup_mask,\n feature_architecture=FLAGS.feature_architecture,\n flow_architecture=FLAGS.flow_architecture,\n size=(FLAGS.global_gpu_batch_size, FLAGS.height, FLAGS.width),\n occlusion_estimation=FLAGS.occlusion_estimation,\n smoothness_at_level=FLAGS.smoothness_at_level,\n use_float16=True,\n )\n return smurf", "def make_action(self, state, test=True):\n ##################\n # YOUR CODE HERE #\n ##################\n # return self.env.get_random_action()\n state = shrink(state)\n d_state = state - self.state\n if self.gae:\n y, val = self.model(cu(Variable(torch.from_numpy(d_state).float())))\n else:\n y = self.model(cu(Variable(torch.from_numpy(d_state).float())))\n self.state = state\n\n prob = F.softmax(y)\n log_prob = F.log_softmax(y)\n entropy = -(log_prob * prob).sum(1)\n act = prob.multinomial().data\n log_prob = log_prob.gather(1, cu(Variable(act)))\n\n if not test:\n self.log_probs.append(log_prob)\n if self.gae:\n self.values.append(val)\n self.entropies.append(entropy)\n return act[0, 0]", "def cook(self, fake):\n \n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + 'discriminator_obj') as scope: \n # if targets is none, then think of this as simple GAN.\n if self.targets is None: \n # generator_obj = - 0.5 * tf.reduce_mean(log(fake))\n generator_obj = 0.5 * tf.reduce_mean( (fake-1) **2 )\n else:\n generator_obj = tf.nn.softmax_cross_entropy_with_logits(labels = self.targets, \n logits = fake, name = self.name)\n tf.summary.scalar('generator_obj', generator_obj)\n tf.add_to_collection( self.name + '_objectives', generator_obj ) \n with tf.variable_scope (self.name + '_probabilites') as scope: \n tf.summary.scalar('fake_probability', tf.reduce_mean(fake))\n\n self._cook_optimizer( \n lr = GEN_GAN_LR, \n optimizer = GEN_GAN_OPTIMIZER,\n l1_coeff = GEN_GAN_L1_COEFF,\n l2_coeff = GEN_GAN_WEIGHT_DECAY_COEFF)", "def mutation(self, base_offsprings, model_features_count) :", "def _create_selu(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 1.67326)\n gamma = onnx_node.getattr(\"gamma\", 1.0507)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, gamma)", "def generate_homography_nn_sgd(self):\n # Create the NN\n self.set_optimizer_sgd()\n self.set_callback(utils.lr_callback)\n self.build_model()\n self.compile()" ]
[ "0.53975487", "0.5394964", "0.5384086", "0.536451", "0.5326402", "0.52954835", "0.52881545", "0.52797973", "0.5263367", "0.5261843", "0.52510035", "0.52152216", "0.52140915", "0.5206617", "0.51791567", "0.5175483", "0.5157839", "0.5151441", "0.5127721", "0.5115076", "0.5077677", "0.5073594", "0.5044752", "0.50418234", "0.50402683", "0.50385326", "0.5035593", "0.5034607", "0.5033801", "0.503089" ]
0.7586326
0
Denests an expression that contains nested square roots.
def sqrtdenest (expr): expr = sympify(expr) if expr.is_Pow and expr.exp is S.Half: #If expr is a square root return denester([expr])[0] return expr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def denester (nested):\n if all((n**2).is_Number for n in nested): #If none of the arguments are nested\n for f in subsets(len(nested)): #Test subset 'f' of nested\n p = prod(nested[i]**2 for i in range(len(f)) if f[i]).expand()\n if 1 in f and f.count(1) > 1 and f[-1]: p = -p\n if sqrt(p).is_Number: return sqrt(p), f #If we got a perfect square, return its square root.\n return nested[-1], [0]*len(nested) #Otherwise, return the radicand from the previous invocation.\n else:\n a, b, r, R = Wild('a'), Wild('b'), Wild('r'), None\n values = [expr.match(sqrt(a + b * sqrt(r))) for expr in nested]\n for v in values:\n if r in v: #Since if b=0, r is not defined\n if R is not None: assert R == v[r] #All the 'r's should be the same.\n else: R = v[r]\n d, f = denester([sqrt((v[a]**2).expand()-(R*v[b]**2).expand()) for v in values] + [sqrt(R)])\n if not any([f[i] for i in range(len(nested))]): #If f[i]=0 for all i < len(nested)\n v = values[-1]\n return sqrt(v[a] + v[b]*d), f\n else:\n v = prod(nested[i]**2 for i in range(len(nested)) if f[i]).expand().match(a+b*sqrt(r))\n if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested)-1]:\n v[a] = -1 * v[a]\n v[b] = -1 * v[b]\n if not f[len(nested)]: #Solution denests with square roots\n return (sqrt((v[a]+d).expand()/2)+sign(v[b])*sqrt((v[b]**2*R/(2*(v[a]+d))).expand())).expand(), f\n else: #Solution requires a fourth root\n FR, s = (R.expand()**Rational(1,4)), sqrt((v[b]*R).expand()+d)\n return (s/(sqrt(2)*FR) + v[a]*FR/(sqrt(2)*s)).expand(), f", "def evaluate_expression_tree(root:Node) -> float:\n if root is None:\n return 0\n if root._left is None and root._right is None:\n return float(root._data)\n left_sum = evaluate_expression_tree(root._left)\n right_sum = evaluate_expression_tree(root._right)\n if root._data == '+':\n return left_sum + right_sum\n elif root._data == '-':\n return left_sum - right_sum\n elif root._data == '*':\n return left_sum * right_sum\n elif root._data == '/':\n return left_sum / right_sum\n elif root._data == '^':\n return left_sum ** right_sum\n else:\n raise ArithmeticError(root._data)", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def test_truediv():\n truediv = _MathExpression() / 2\n assert math.isclose(truediv(9), 4.5) # type: ignore", "def test_cases():\r\n quadratic_roots(1,3,-21)\r\n quadratic_roots(2,-4,-6)\r\n quadratic_roots(1,4,-12)\r\n quadratic_roots(4,12,9)\r\n quadratic_roots(-2,-11,-21)\r\n quadratic_roots(4,1,4)\r\n quadratic_roots(1,1,0)\r\n quadratic_roots(1,0,-16)\r\n quadratic_roots(1,-14,-49)\r\n quadratic_roots(1,10,25)", "def test_sqrt(doctest):", "def test_rtruediv():\n truediv = _MathExpression() / 2\n rtruediv = 9 / _MathExpression()\n assert truediv(9) == rtruediv(2)", "def test_expression(x, y, z):\n return x * y + y / z", "def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))", "def test_solve_ex_2_11(self):\n\n def f_a(x):\n return x - x ** 3 - 4 * x ** 2 + 10\n\n def f_b(x):\n inner = 10 / x - 4 * x\n logger.info(\"Performing sqrt({})\".format(inner))\n return math.sqrt(inner)\n\n logger.info('-' * 40)\n # f_a(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as it diverges and oscillates.\n iterate.solve(f_a, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)\n\n with self.assertRaises(ValueError):\n # f_b(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as the 3rd iteration attempts to root a -ve number.\n iterate.solve(f_b, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)", "def test_product_single_frac(self):\r\n self.assertEquals(\r\n preview.latex_preview('(2+3)/(4+5)'),\r\n r'\\frac{2+3}{4+5}'\r\n )", "def test_sum_expression(self):\n # The logic of SumExpression is checked in the above tests (which include\n # addition and subtraction). Here, we only check that constructing a\n # SumExpression flattens the list.\n structure_memoizer = {\n defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,\n defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)\n }\n\n term_values = [0, 1, 2, 3, 4]\n\n def create_dummy_expression(value):\n \"\"\"Creates an empty `Expression` with the given extra constraints.\"\"\"\n basic_expression_object = basic_expression.BasicExpression(\n [term.TensorTerm(value)])\n return expression.ExplicitExpression(basic_expression_object,\n basic_expression_object)\n\n expressions = [create_dummy_expression(value) for value in term_values]\n\n # Each of our Expressions contains exactly one term, so by checking its\n # value we can uniquely determine which subexpression is which.\n def term_value(expression_object):\n terms = expression_object.penalty_expression._terms\n self.assertEqual(1, len(terms))\n return terms[0].tensor(structure_memoizer)\n\n sum1 = expression.SumExpression([expressions[0], expressions[1]])\n sum2 = expression.SumExpression([expressions[2]])\n sum3 = expression.SumExpression([expressions[3]])\n sum4 = expression.SumExpression([expressions[4]])\n sum5 = expression.SumExpression([sum3, sum4])\n sum6 = expression.SumExpression([sum1, sum2, sum5])\n\n actual_expressions = sum6._expressions\n self.assertEqual(5, len(actual_expressions))\n for ii in xrange(5):\n self.assertEqual(ii, term_value(expressions[ii]))\n self.assertEqual(ii, term_value(actual_expressions[ii]))", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])", "def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])", "def test_rational_predicate(doctest):", "def isSqrt(self):\n return _libsbml.ASTNode_isSqrt(self)", "def test_product_big_frac(self):\r\n self.assertEquals(\r\n preview.latex_preview('2*3/4/5'),\r\n r'\\frac{2\\cdot 3}{4\\cdot 5}'\r\n )", "def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))", "def test_second_level_composition(self):\n oe = expression.OperationalExpression\n v1, v2 = map(expression.Variable, [\"v1\", \"v2\"])\n exp = (v1 + v2) / (v1 - v2)\n expected_exp = oe('/', oe('+', v1, v2), oe('-', v1, v2))\n self.assert_equal(exp, expected_exp)", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def test_function_sqrt(self):\r\n self.assertEquals(preview.latex_preview('sqrt(3)'), r'\\sqrt{3}')", "def sqrt(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sqrt()))", "def root(x, y, precision=20):\n if type(x) is complex:\n return x ** (1.0 / y)\n if type(y) is complex:\n return x ** (1.0 / y)\n\n try:\n x = float(x)\n except:\n raise TypeError(\"{0} is not a number\".format(x))\n try:\n y = float(y)\n except:\n raise TypeError(\"{0} is not a number\".format(y))\n if x < 0 and y % 2 == 0:\n return root(abs(x), y) * 1j\n elif x < 0:\n return -root(abs(x), y)\n elif y < 0:\n return x ** (1.0 / y)\n\n if int(x) == x:\n x = int(x)\n if int(y) == y:\n y = int(y)\n # return x ** (1.0 / y)\n\n f = lambda n: n ** y - x\n fprime = lambda n: y * n ** (y - 1)\n\n guess = 10\n change = 1\n\n while change > 10 ** -precision:\n guess2 = guess - f(guess) / fprime(guess)\n change = abs(guess2 - guess)\n guess = guess2\n\n return guess", "def test_spheroid_deviation(spheroid_convex_fixture):\n assert(spheroid_convex_fixture.linear_deviation() == pytest.approx(-spheroid_convex_fixture.linear_deviation_abs()))", "def actual_root(x):\n root = x ** (1/n)\n\tprint(x)\n return root", "def test_expr(self):\n self.common_test_expr(True)", "def test_nested_query():\n schema = graphene.Schema(query=NestedQuery)\n response = schema.execute(\"{topLevel {name, leaf {value , leaflets {value} } } }\")\n assert to_dict(response.data) == {\n \"topLevel\": {\n \"name\": \"top level name\",\n \"leaf\": {\n \"value\": \"some leaf value\",\n \"leaflets\": [{\"value\": \"subleaf1\"}, {\"value\": \"subleaf2\"}],\n },\n }\n }", "def test_pow():\n # Test for exponent with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x ** 2\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value ** 2\n assert x.grad() == x.value ** 2 * np.log(x.value)\n # assert x.children == (x.value ** 2 * np.log(x.value), z)\n except AssertionError as e:\n print(e)\n\n# Test for exponent with two scalar Rnode object\n x = Rnode(0.11)\n y = Rnode(0.2)\n z = x ** y\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value ** y.value\n assert x.grad() == x.value ** y.value * np.log(x.value)\n except AssertionError as e:\n print(e)", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def test_roots_slow():\n a, b, c, d, x = symbols(\"a,b,c,d,x\")\n\n f1 = x ** 2 * c + (a / b) + x * c * d - a\n f2 = x ** 2 * (a + b * (c - d) * a) + x * a * b * c / (b * d - d) + (a * d - c / d)\n\n assert list(roots(f1, x).values()) == [1, 1]\n assert list(roots(f2, x).values()) == [1, 1]\n\n (zz, yy, xx, zy, zx, yx, k) = symbols(\"zz,yy,xx,zy,zx,yx,k\")\n\n e1 = (zz - k) * (yy - k) * (xx - k) + zy * yx * zx + zx - zy - yx\n e2 = (zz - k) * yx * yx + zx * (yy - k) * zx + zy * zy * (xx - k)\n\n assert list(roots(e1 - e2, k).values()) == [1, 1, 1]\n\n f = x ** 3 + 2 * x ** 2 + 8\n R = list(roots(f).keys())\n\n assert not any(i for i in [f.subs(x, ri).n(chop=True) for ri in R])" ]
[ "0.6896922", "0.6043629", "0.59849846", "0.5922657", "0.59038043", "0.5823445", "0.58146596", "0.5793079", "0.57804644", "0.57395005", "0.5695278", "0.5685111", "0.5664562", "0.5645821", "0.5620064", "0.56190675", "0.5606242", "0.55986995", "0.5581013", "0.5559273", "0.5549385", "0.550636", "0.54669327", "0.5465975", "0.5455947", "0.54052186", "0.5400933", "0.5395314", "0.537983", "0.5341416" ]
0.70765764
0
Denests a list of expressions that contain nested square roots. This method should not be called directly use 'denest' instead.
def denester (nested): if all((n**2).is_Number for n in nested): #If none of the arguments are nested for f in subsets(len(nested)): #Test subset 'f' of nested p = prod(nested[i]**2 for i in range(len(f)) if f[i]).expand() if 1 in f and f.count(1) > 1 and f[-1]: p = -p if sqrt(p).is_Number: return sqrt(p), f #If we got a perfect square, return its square root. return nested[-1], [0]*len(nested) #Otherwise, return the radicand from the previous invocation. else: a, b, r, R = Wild('a'), Wild('b'), Wild('r'), None values = [expr.match(sqrt(a + b * sqrt(r))) for expr in nested] for v in values: if r in v: #Since if b=0, r is not defined if R is not None: assert R == v[r] #All the 'r's should be the same. else: R = v[r] d, f = denester([sqrt((v[a]**2).expand()-(R*v[b]**2).expand()) for v in values] + [sqrt(R)]) if not any([f[i] for i in range(len(nested))]): #If f[i]=0 for all i < len(nested) v = values[-1] return sqrt(v[a] + v[b]*d), f else: v = prod(nested[i]**2 for i in range(len(nested)) if f[i]).expand().match(a+b*sqrt(r)) if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested)-1]: v[a] = -1 * v[a] v[b] = -1 * v[b] if not f[len(nested)]: #Solution denests with square roots return (sqrt((v[a]+d).expand()/2)+sign(v[b])*sqrt((v[b]**2*R/(2*(v[a]+d))).expand())).expand(), f else: #Solution requires a fourth root FR, s = (R.expand()**Rational(1,4)), sqrt((v[b]*R).expand()+d) return (s/(sqrt(2)*FR) + v[a]*FR/(sqrt(2)*s)).expand(), f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sum_expression(self):\n # The logic of SumExpression is checked in the above tests (which include\n # addition and subtraction). Here, we only check that constructing a\n # SumExpression flattens the list.\n structure_memoizer = {\n defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,\n defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)\n }\n\n term_values = [0, 1, 2, 3, 4]\n\n def create_dummy_expression(value):\n \"\"\"Creates an empty `Expression` with the given extra constraints.\"\"\"\n basic_expression_object = basic_expression.BasicExpression(\n [term.TensorTerm(value)])\n return expression.ExplicitExpression(basic_expression_object,\n basic_expression_object)\n\n expressions = [create_dummy_expression(value) for value in term_values]\n\n # Each of our Expressions contains exactly one term, so by checking its\n # value we can uniquely determine which subexpression is which.\n def term_value(expression_object):\n terms = expression_object.penalty_expression._terms\n self.assertEqual(1, len(terms))\n return terms[0].tensor(structure_memoizer)\n\n sum1 = expression.SumExpression([expressions[0], expressions[1]])\n sum2 = expression.SumExpression([expressions[2]])\n sum3 = expression.SumExpression([expressions[3]])\n sum4 = expression.SumExpression([expressions[4]])\n sum5 = expression.SumExpression([sum3, sum4])\n sum6 = expression.SumExpression([sum1, sum2, sum5])\n\n actual_expressions = sum6._expressions\n self.assertEqual(5, len(actual_expressions))\n for ii in xrange(5):\n self.assertEqual(ii, term_value(expressions[ii]))\n self.assertEqual(ii, term_value(actual_expressions[ii]))", "def sqrtdenest (expr):\n expr = sympify(expr)\n if expr.is_Pow and expr.exp is S.Half: #If expr is a square root\n return denester([expr])[0]\n return expr", "def test_cases():\r\n quadratic_roots(1,3,-21)\r\n quadratic_roots(2,-4,-6)\r\n quadratic_roots(1,4,-12)\r\n quadratic_roots(4,12,9)\r\n quadratic_roots(-2,-11,-21)\r\n quadratic_roots(4,1,4)\r\n quadratic_roots(1,1,0)\r\n quadratic_roots(1,0,-16)\r\n quadratic_roots(1,-14,-49)\r\n quadratic_roots(1,10,25)", "def test_suite():\n test(calc_det([[2, 1],[3, 4]]), 5)", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def squareRoot(requestContext, seriesList):\n for series in seriesList:\n series.name = \"squareRoot(%s)\" % (series.name)\n for i,value in enumerate(series):\n series[i] = safePow(value, 0.5)\n return seriesList", "def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))", "def test_sqrt(doctest):", "def evaluate_expression_tree(root:Node) -> float:\n if root is None:\n return 0\n if root._left is None and root._right is None:\n return float(root._data)\n left_sum = evaluate_expression_tree(root._left)\n right_sum = evaluate_expression_tree(root._right)\n if root._data == '+':\n return left_sum + right_sum\n elif root._data == '-':\n return left_sum - right_sum\n elif root._data == '*':\n return left_sum * right_sum\n elif root._data == '/':\n return left_sum / right_sum\n elif root._data == '^':\n return left_sum ** right_sum\n else:\n raise ArithmeticError(root._data)", "def test_reroot(self):\n t = DndParser(\"(((a,b)c,(d,e)f)g,(h,i)j);\")\n tips = ['a','b']\n for n in t.traverse():\n n.Length = 1.0\n \n # note, g is lost because it has a single descendent and gets pruned off\n exp = \"((a:1.0,b:1.0)c:0.5,((d:1.0,e:1.0)f:1.0,(h:1.0,i:1.0)j:2.0):0.5);\"\n obs = reroot(t, tips)\n self.assertEqual(obs.getNewick(with_distances=True), exp)", "def execute(self, root):\n assert isinstance(root, Node)\n\n null = Null()\n\n def optional(expression):\n \"\"\"return True iff expression is optional\"\"\"\n return any(e.data == 'optional' for e in expression.children)\n\n def concatenate(expression, stream):\n \"\"\"evaluate query expressions and concatenate results\"\"\"\n # fork the stream for each subexpression\n streams = itertools.tee(stream, len(expression.children))\n return itertools.chain.from_iterable(\n evaluate(expression, stream)\n for expression, stream in zip(expression.children, streams)\n )\n\n def iterate(expression, stream):\n \"\"\"iterate over json stream\"\"\"\n for node in stream:\n itr = (\n iter(node)\n if isinstance(node, List) else\n iter(node.values())\n if isinstance(node, Object) else\n iter([])\n if optional(expression) else\n None\n )\n if not itr:\n raise TypeError(\n 'cannot iterate over {}'.format(\n node.__class__.__name__\n )\n )\n for child in itr:\n yield child\n\n def indexer(expression, stream):\n \"\"\"extract elements from json containers\"\"\"\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)\n\n def properties(expression, stream):\n \"\"\"extract values from json objects\"\"\"\n def index(expression, stream):\n item = expression.children[0].children[0]\n for node in stream:\n if isinstance(node, Object):\n yield node.get(item, null)\n elif not optional(expression):\n itype = expression.children[0].data\n if itype == 'cname':\n itype = 'string'\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__, itype\n )\n )\n\n for expression in expression.children:\n stream = index(expression, stream)\n\n for node in stream:\n yield node\n\n def primitive(expression):\n \"\"\"return a primitive type\"\"\"\n expression = expression.children[0]\n if expression.data == 'null':\n return null\n elif expression.data == 'boolean':\n return expression.children[0] == 'true'\n elif expression.data == 'string':\n return expression.children[0][1:-1]\n elif expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n return float(expression.children[0])\n assert False, 'bad primitive {}'.format(expression)\n\n def evaluate(expression, stream):\n \"\"\"evaluate query expression over json stream\"\"\"\n assert expression.data == 'expression', expression\n assert len(expression.children) == 1\n\n expression = expression.children[0]\n\n if expression.data == 'identity':\n for node in stream:\n yield node\n\n elif expression.data == 'primitive':\n yield primitive(expression)\n\n elif expression.data == 'properties':\n for node in properties(expression, stream):\n yield node\n\n elif expression.data == 'indexer':\n for node in indexer(expression, stream):\n yield node\n\n elif expression.data == 'iterator':\n for node in iterate(expression, stream):\n yield node\n\n elif expression.data == 'concatenator':\n for node in concatenate(expression, stream):\n yield node\n\n else:\n assert False, 'bad expression {}'.format(expression)\n\n stream, pipeline = [root], self.tree.children[0]\n for expression in pipeline.children:\n stream = evaluate(expression, stream)\n\n for result in stream:\n yield result", "def test_expression(x, y, z):\n return x * y + y / z", "def test_exp_square():\n\timport odelab.scheme.exponential as E\n\tfor name in dir(E):\n\t\tcls = getattr(E, name)\n\t\tif hasattr(cls, 'general_linear_z'):\n\t\t\tobj = cls()\n\t\t\ta,b = obj.general_linear_z(np.eye(2))\n\t\t\tnb_stages = len(a)\n\t\t\ttail_length = obj.tail_length\n\t\t\tyield CheckSquare(name),name, a,b, nb_stages, tail_length", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def test_deep(self, expr, result, mode):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode)\n b = symbol(name='b', dimensions=(j, k), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_rational_predicate(doctest):", "def test_expr(self):\n self.common_test_expr(True)", "def test1(debug_solve=False):\n from numpy import sqrt\n for x0 in [1., 2., 100.]:\n print \" \" # blank line\n x,iters = solve(fvals_sqrt, x0, debug=debug_solve)\n print \"solve returns x = %22.15e after %i iterations \" % (x,iters)\n fx,fpx = fvals_sqrt(x)\n print \"the value of f(x) is %22.15e\" % fx\n assert abs(x-2.) < 1e-14, \"*** Unexpected result: x = %22.15e\" % x", "def testCalculate(self):\r\n for i in range(len(self.__testExpressions)):\r\n self.__Calculator.setExpression(self.__testExpressions[i])\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__Calculator.getResult(), self.__testResult[i])", "def collect_derivatives(expressions):\n processed = []\n for e in expressions:\n # Track type and number of nested Derivatives\n mapper = inspect(e)\n\n # E.g., 0.2*u.dx -> (0.2*u).dx\n ep = aggregate_coeffs(e, mapper)\n\n # E.g., (0.2*u).dx + (0.3*v).dx -> (0.2*u + 0.3*v).dx\n processed.append(factorize_derivatives(ep))\n\n return processed", "def test_all(self):\n\n tokens = list(Lexer(\"12 + 2^(8/4) - 5 * (7%4)\").generate_tokens())\n answer = [Token(TokenType.NUMBER, 12),\n Token(TokenType.PLUS),\n Token(TokenType.NUMBER, 2),\n Token(TokenType.EXPONENT),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 8),\n Token(TokenType.DIVIDE),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN),\n Token(TokenType.MINUS),\n Token(TokenType.NUMBER, 5),\n Token(TokenType.MULTIPLY),\n Token(TokenType.LPAREN),\n Token(TokenType.NUMBER, 7),\n Token(TokenType.MODULO),\n Token(TokenType.NUMBER, 4),\n Token(TokenType.RPAREN)]\n #Token(TokenType.NUMBER, 3)]\n self.assertEqual(tokens, answer)", "def test_simpson_e(self):\n c = array([1,2,3,1])\n s = simpson(c)\n self.assertEqual((1/s)/4, simpson_e(c))", "def test_doubles(self):\n self.assertEqual(doubles(self.TestData), 3)\n self.assertEqual(doubles(array([0,3,4])), 0)\n self.assertEqual(doubles(array([2])), 1)", "def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])", "def test_evaluate_hierarchical(backend):\n # skip test for dask backend if dask is not installed\n if backend == \"dask\" and not _check_soft_dependencies(\"dask\", severity=\"none\"):\n return None\n\n y = _make_hierarchical(\n random_state=0, hierarchy_levels=(2, 2), min_timepoints=12, max_timepoints=12\n )\n X = _make_hierarchical(\n random_state=42, hierarchy_levels=(2, 2), min_timepoints=12, max_timepoints=12\n )\n y = y.sort_index()\n X = X.sort_index()\n\n forecaster = DirectReductionForecaster(LinearRegression())\n cv = SlidingWindowSplitter()\n scoring = MeanAbsolutePercentageError(symmetric=True)\n out_exog = evaluate(\n forecaster, cv, y, X=X, scoring=scoring, error_score=\"raise\", backend=backend\n )\n out_no_exog = evaluate(\n forecaster, cv, y, X=None, scoring=scoring, error_score=\"raise\", backend=backend\n )\n\n scoring_name = f\"test_{scoring.name}\"\n assert np.all(out_exog[scoring_name] != out_no_exog[scoring_name])", "def test_solve_ex_2_11(self):\n\n def f_a(x):\n return x - x ** 3 - 4 * x ** 2 + 10\n\n def f_b(x):\n inner = 10 / x - 4 * x\n logger.info(\"Performing sqrt({})\".format(inner))\n return math.sqrt(inner)\n\n logger.info('-' * 40)\n # f_a(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as it diverges and oscillates.\n iterate.solve(f_a, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)\n\n with self.assertRaises(ValueError):\n # f_b(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as the 3rd iteration attempts to root a -ve number.\n iterate.solve(f_b, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)", "def test_suite():\n test(sum_of_squares([2, 3, 4]) == 29)\n test(sum_of_squares([ ]) == 0)\n test(sum_of_squares([2, -3, 4]) == 29)", "def test_density_multiple(self):\n earth = CoreMantleCrustModel()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def test_div_complex(doctest):" ]
[ "0.62695044", "0.6142509", "0.57942927", "0.5488867", "0.54754835", "0.54533607", "0.5403379", "0.5387797", "0.53272367", "0.53124315", "0.5282791", "0.5224005", "0.52176845", "0.5194085", "0.5188306", "0.5179866", "0.5165743", "0.51421887", "0.5111471", "0.50850046", "0.5078342", "0.50492865", "0.5040237", "0.50255054", "0.50116", "0.5010487", "0.49900374", "0.49897626", "0.4983782", "0.49746695" ]
0.66358936
0
Returns all possible subsets of the set (0, 1, ..., n1) except the empty set.
def subsets(n): binary = lambda x: x>0 and binary(x>>1) + [x&1] or [] pad = lambda l: [0]*(n-len(l)) + l #Always returns a list of length 'n' return [pad(binary(i)) for i in range(1, 2**n)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allSubsets(self):\n n = self.graph.n\n subsets = np.zeros((2**n,n))\n for i in range(2**n):\n binary = np.array(list(bin(i)[2:])).astype(float)\n if binary.shape[0] < n:\n padding = np.zeros(n-binary.shape[0])\n subsets[i,:] = np.append(padding, binary)\n else:\n subsets[i,:] = binary\n return np.where(subsets > 0, 1, -1)", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def subsets(x, k):\n sub_set = set()\n for i in x:\n sub_set = sub_set.union(set(combinations(i, k)))\n return list(sub_set)", "def subsets(self):\n return set(self.subset_map.values())", "def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def all_subsets(self, ss):\n return chain(*map(lambda x: combinations(ss, x), range(1, len(ss)+1)))", "def powerset(n):\n # chain r-combinations generator for r=0, 1,..., n\n return chain.from_iterable(combinations(range(n), r) for r in range(n+1))", "def comb(set, n):\n if len(set) < n:\n raise Exception(\"Not enough elements\")\n elif len(set) == n:\n yield set\n else:\n setLen = len(set)\n iters = [rangeIter(setLen - n + 1)]\n values = [0] * n\n values[0] = iters[0].next()\n level = 1\n while True:\n # Fill array of iterators back up\n while level < n:\n iters.append(rangeIter(values[level - 1] + 1,\n setLen - n + level + 1))\n values[level]=iters[level].next()\n level += 1\n subset = [set[i] for i in values]\n yield subset\n while True:\n try:\n values[level - 1] = iters[level - 1].next()\n break\n except StopIteration:\n iters.pop()\n level -= 1\n if level == 0:\n # Top-level iterator is done, so we are too\n raise StopIteration", "def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]", "def getSubsets(cityIndicesExcluding1, subsetSizeWithout1):\r\n\r\n # Getting subsets of specific size excluding start vertex, i.e.,city 1 and without the city 1\r\n subsets = []\r\n X = list( itertools.combinations(cityIndicesExcluding1, subsetSizeWithout1) )\r\n\r\n # Appending start vertex, i.e., city 1 to each subset\r\n for x in X:\r\n x = ( 1, ) + x\r\n subsets.append( x )\r\n\r\n return subsets", "def powerset1(s):\n result = [[]]\n for ss in s:\n new_subset = [subset + [ss] for subset in result]\n result.extend(new_subset)\n return result", "def k_subsets(set_, k):\n ensure_countable(set_)\n\n if not isinstance(k, Integral):\n raise TypeError(\"subset cardinality must be a number\")\n if not (k >= 0):\n raise ValueError(\"subset cardinality must be positive\")\n if not (k <= len(set_)):\n raise ValueError(\"subset cardinality must not exceed set cardinality\")\n\n result = combinations(set_, k)\n return _harmonize_subset_types(set_, result)", "def subsets(lst):\n\tsubSet = [[]]\n\tfor element in lst:\n\t\tfor s in subSet[:]:\n\t\t\tsubSet.append(s.copy())\n\t\t\ts.append(element)\n\treturn subSet", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans, res = [], []\n\n for i in range(2**n, 2**(n+1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n res = [nums[j] for j in range(n) if bitmask[j] == '1']\n if res not in ans:\n ans.append(res)\n\n return ans\n # print(ans)", "def get_subsets(arr, n, value):\n result = []\n # return immediately if there is no possible subset in arr whose sum is equal to value\n if dp[n][value] == False:\n return\n \n queue = deque()\n queue.append(Pair(n, value, set()))\n\n while len(queue) > 0:\n pair = queue.popleft()\n if pair.i == 0 or pair.j == 0:\n result.append([arr[i] for i in pair.path_set])\n else:\n exclude = dp[pair.i - 1][pair.j]\n if exclude:\n queue.append(Pair(pair.i-1, pair.j, pair.path_set))\n\n if pair.j >= arr[pair.i-1]:\n include = dp[pair.i - 1][pair.j - arr[pair.i -1]]\n if include:\n b = pair.path_set.copy()\n b.add(pair.i - 1)\n queue.append(Pair(pair.i - 1, pair.j-arr[pair.i-1], b))\n \n return result", "def powerset(iterable):\n return map(set, chain.from_iterable(\n combinations(iterable, r) for r in range(len(iterable) + 1)))", "def power_set(self):\n if self._is_empty():\n return Set([Set()])\n\n copy_set = self._clone()\n\n element = copy_set.__from__()\n\n power_set = copy_set.power_set()\n\n result = Set()\n\n for item in power_set:\n result += Set([Set([element]) + item]) + Set([item])\n return result", "def powerset(a):\n if len(a) == 0:\n return set([frozenset()])\n accumulator = set()\n a = set(a)\n element = a.pop()\n for subset in powerset(a):\n accumulator.add(subset)\n accumulator.add(frozenset(set([element]) | subset))\n return accumulator", "def sets(elements, set_size):\n return combinations(elements, set_size)", "def powerset(s):\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def combos(array,n=2): \n # base case\n if n==0:\n yield frozenset()\n return\n\n # core recursion\n for c in set(combos(array,n-1)):\n for i in array:\n #added this to avoid duplicate combos\n if i not in c:\n # add element i to combo c\n yield frozenset({i})| c", "def powerset(iterable):\n\tset_list = list(iterable)\n\treturn list(chain.from_iterable(combinations(set_list, r)\n\t\t\t\t\t\t\t\tfor r in range(len(set_list)+1)))", "def powerset(iterable):\r\n \r\n s = list(iterable)\r\n subsets = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\r\n tuples=list(subsets)\r\n tuples.remove(())\r\n return tuples", "def all_subsets_of_size(L, size):\r\n pass # Left as an exercise for the reader\r", "def powerset(iterable, include_empty = True):\n s = list(iterable)\n i = chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))\n if not include_empty:\n next(i)\n return i", "def powerset(lst):\n return reduce(lambda rslt, x: rslt + [subset + [x] for subset in rslt],\n lst, [[]])", "def powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable( itertools.combinations(s, r)\n for r in range(len(s)+1) )" ]
[ "0.7304939", "0.6939716", "0.6939716", "0.69092846", "0.68856454", "0.68767667", "0.67554104", "0.67149925", "0.66519034", "0.66332114", "0.64102685", "0.6401927", "0.6380161", "0.6379676", "0.6356411", "0.63498825", "0.6328516", "0.6322608", "0.6320797", "0.6247256", "0.62364936", "0.62323385", "0.6109125", "0.6096608", "0.60819334", "0.6074155", "0.60732573", "0.6069375", "0.6052151", "0.6048907" ]
0.7694586
0
Returns the product of all elements of n, as a Rational.
def prod(n): product = S.One for i in n: product = product * i return product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multiply(self, n): \n f_num = self.num*n.num\n f_den = self.den*n.den\n f = Fraction(f_num, f_den)\n return f.reduce()", "def prod_of_nth(n):\n factorial = 1\n for i in range(1,n+1):\n factorial *= i\n return factorial", "def __mul__(self, n):\n assert isinstance(n, int)\n assert n >= 0\n\n \n if n == 0:\n return Inf(self.curve)\n \n else:\n Q = self\n R = Inf(self.curve)\n \n i = 1\n while i <= n:\n if n & i == i:\n R = R + Q\n \n Q = Q + Q\n \n i = i << 1\n \n return R", "def permute(n, r):\n\n product = 1\n for i in range(n - r + 1, n + 1):\n product *= i\n return product", "def permutations_(n, r):\n return factorial(n) / factorial(n-r)", "def r_permutations(n, r):\n return math.factorial(n) / math.factorial(n - r)", "def r_combinations(n,r):\n return r_permutations(n,r) / math.factorial(r)", "def prod(self):\n r = 0\n for i in range(len(self)):\n r *= self[i]\n\n return r", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def fractionify_and_reduce(n):\n nume, denom = fractionify(n)\n return reduce(nume, denom)", "def radicale(n):\n r = 1\n for p in primi(n+1):\n if p>n:\n break\n if n%p==0:\n r *= p\n n = n//p\n return r", "def multiply(m, n):\n if n == 1:\n return m\n else:\n return m + multiply(m, n - 1)", "def geometric_series_sum(a, r, n):\n\n if isinstance(a, int) and isinstance(r, int):\n return a * (1 - (r ** n)) / (1 - r)\n else:\n return 1.0 * a * (1 - (r ** n)) / (1 - r)", "def get_multiples(ratio, n):\n ls = [ratio ** i for i in range(n)]\n return ls", "def prod(l):\n r = 1\n for x in l:\n r *= x\n return r", "def power(x, n):\n value = 1\n for i in range(n):\n value = multiply(value, x)\n return value", "def factorial(n):\n return product(range(1, n + 1))", "def combination(n, r):\n\n \n\n \n numerator = factorial(n)\n denominator = factorial(r)\n subtracted_answer = factorial(n-r)\n \n\n answer = numerator/(denominator * subtracted_answer)\n print(answer)\n return answer", "def power(x, n):\n power = 1\n for i in range(abs(n)):\n power = multiply(power, x) \n return power", "def power(x, n):\n # Negative and fractional powers are not allowed\n if n < 0:\n raise ValueError('n cannot be negative')\n elif 0 < n < 1.0:\n raise ValueError('n cannot be fractional')\n\n result = 1\n for _ in range(n):\n result = multiply(result, x)\n return result", "def product( iterable ):\n p= 1\n for n in iterable:\n p *= n\n return p", "def multiplicity(q, N):\n return int(_math.factorial(q + N - 1) /\n (_math.factorial(q) * _math.factorial(N - 1)))", "def __pow__(self, n):\n if not isinstance(n, Integer):\n try:\n n = Integer(n)\n except TypeError:\n raise TypeError(\"Exponent n (= %s) must be an integer.\" % n)\n if n == 1:\n return self\n if n == 0:\n return Factorization([])\n if self.is_commutative():\n return Factorization([(p, n*e) for p, e in self], unit=self.unit()**n, cr=self.__cr, sort=False, simplify=False)\n from sage.groups.generic import power\n return power(self, n, Factorization([]))", "def product(factors):\n product = 1\n for i in factors:\n product *= i\n return product", "def ncr(n, r):\n r = min(r, n-r)\n if r == 0:\n return 1\n if r < 0:\n return 0\n numer = reduce(op.mul, xrange(n, n-r, -1))\n denom = reduce(op.mul, xrange(1, r+1))\n return numer / denom", "def powerize(n, p):\n return sum(int(d)**p for d in str(n))", "def permutations(n, r):\n result = 1\n for i in range(n, n-r, -1):\n result *= i\n return result", "def ext_mul(self, n: int, a: 'PFElement') -> 'PFElement':\n return self(self._pf_ext_mul(n, a.value, self.additive_group))", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p" ]
[ "0.7174945", "0.6563755", "0.6436478", "0.63774896", "0.631241", "0.62883914", "0.62796164", "0.62260956", "0.62194073", "0.6203918", "0.61760634", "0.61431354", "0.61146164", "0.61055475", "0.6087843", "0.6079033", "0.60457134", "0.6006618", "0.59772867", "0.59496444", "0.5941227", "0.59361434", "0.58856976", "0.5868706", "0.5865062", "0.5858928", "0.58236164", "0.581827", "0.58144236", "0.58144236" ]
0.7256208
0
Takes a name and returns an ID retrieved from the gGUIIDS dictionary. If the name is not in the dict, it's added.
def guiID(name): if not gGUIIDS.has_key(name): gGUIIDS[name] = wx.NewId() return gGUIIDS[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name_to_id(self, name, add_if_missing=False):\n if isinstance(name, basestring):\n # lookup name or assign new\n if name not in self.names:\n if not add_if_missing:\n raise ValueError(\"name \" + name + \" not found\")\n # Use an empty slot in the list if one exists.\n try:\n id_ = self.names.index(None)\n self.names[id_] = name\n self.hashesperid[id_] = 0\n except ValueError:\n self.names.append(name)\n self.hashesperid = np.append(self.hashesperid, [0])\n id_ = self.names.index(name)\n else:\n # we were passed in a numerical id\n id_ = name\n return id_", "def registerIdentifier(self, name):\n assert mathUtils.isAString(name)\n assert name not in self._registeredIdentifiers\n # don't allow adding identifiers if existing jobs are already running, I think?\n assert not self._prefixToIdentifiers\n self._registeredIdentifiers.add(name)", "def event_id(self, event_name):\n try:\n event_id = self.gui_event_ids[event_name]\n except KeyError:\n event_id = len(self.gui_events)\n self.gui_event_ids[event_name] = event_id\n self.gui_events.append(event_name)\n if event_id >= 16383:\n raise RuntimeError(\"Maximum number of events exceeded\")\n return event_id", "def convert_name_to_id(self, item_name): \n if item_name not in self.__dict_name_to_id or len(self.__dict_name_to_id[item_name]) > 1:\n return None\n return self.__dict_name_to_id[item_name][0]", "def _get_name(self, name):\n try:\n return self._names.index(name)\n except ValueError:\n self._names.append(name)\n return len(self._names) - 1", "def make_id(self, name: str) -> str:\n # id_cache is intentionally mutable\n id = self.id_cache.get(name)\n if not id:\n id = 'epub-%d' % self.env.new_serialno('epub')\n self.id_cache[name] = id\n return id", "def get_available_id(self):\n n = 0\n while True:\n id = '<name_{}>'.format(n)\n if id not in self.database:\n break\n n += 1\n return id", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def get_id(cls, name):\n assert name, 'name is empty'\n if name in cls._ids:\n return cls._ids[name]\n sql = \"SELECT id FROM hive_communities WHERE name = :name\"\n cid = DB.query_one(sql, name=name)\n if cid:\n cls._ids[name] = cid\n cls._names[cid] = name\n return cid", "def get_id_from_name(self, the_name: str) -> Optional[str]:\n\n prospective = None\n for key, value in self.labels.items():\n if value == the_name:\n prospective = key\n break\n return prospective", "def __assign_name_id(self):\n if not self.name_id:\n self.name_id = str(BaseTicketing.objects.create())", "def register(self, name):\n\n if name in self.players.itervalues():\n userPID = dict((self.players[k], k) for k in self.players)[name]\n self._logger.debug(\"Player already exists, giving ID\")\n return (True, {\"playerID\": userPID})\n else:\n newID = _getUniqueInt(self.players.keys())\n self.players[newID] = name\n TournamentSystem._logger.debug(\"Registered %s with playerID %d\",\n name, newID)\n return (True, {\"playerID\": newID})", "def _get_gid(name):\n if getgrnam is None or name is None:\n return None\n try:\n result = getgrnam(name)\n except KeyError:\n result = None\n if result is not None:\n return result[2]\n return None", "def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]", "def get_unique_name(self, name=''):\n return self.scope.deduplicate(name)", "def get_id_from_name(item_name):\n try:\n return next(item for item in mapping if item[\"name\"].lower() == item_name.lower())[\"id\"]\n except StopIteration:\n return None", "def convert_chart_name_to_id(chart_name):\n ordered_coa = OrderedDict(chart_of_accounts)\n r = list(ordered_coa.keys()).index(chart_name)\n return r", "def _add_name(self, msg, name):\n try:\n names = self.get_local(msg, \"names\")\n except KeyError:\n names = set()\n names.add(name)\n self.set_local(msg, \"names\", names)", "def internName(self, name):\n if name in self.names:\n return self.names[name]\n else:\n new = self.nameidx\n self.names[name] = new\n self.invnames[new] = name\n self.nameidx += 1\n return new", "def get_network_id_by_name(name: str) -> str:\n networks_info = get_networks()\n\n for network in networks_info[\"networks\"]:\n if network[\"name\"] == name:\n return network[\"id\"]\n\n raise AttributeError(f\"No network named {name}\")", "def get_game_id(self) -> str:\n return self.game_name_entry.get()", "def name_to_id(player_name):\n # This is fairly unsophisticated, just does a CRC32 on the name. Can be\n # optimized both for compute requirements and collision frequency using\n # another hashing algorithm.\n return binascii.crc32(player_name) & 0xFFFFFFFF", "def get_data_id(self, name):\n\n idx = -1\n if type(name) is str:\n data_names = self.data_list()\n if name in data_names:\n idx = data_names.index(name)\n return(idx)", "def output_name_to_id(self, name):\n for i, o in list(r.outputs.items()):\n if o.name == name:\n return i", "def getPlayerIDFromName(name):\n\n # Connect to the database.\n conn, c = main.connect()\n\n # Select the player that matches the name.\n SQL = \"SELECT playerID FROM player WHERE playerName=%s\"\n data = (name, )\n c.execute(SQL, data)\n\n toReturn = c.fetchone()\n\n conn.commit()\n conn.close()\n\n # Only return the first result\n return toReturn[0]", "def get_unique_id(name: str) -> str:\n name = get_data_source(name)\n suffixes = \".\".join(sfx for sfx in get_format_suffixes(name) if sfx)\n return re.sub(rf\"[.]{suffixes}$\", \"\", name)", "def name_to_gid(name):\r\n try:\r\n gid = int(name)\r\n except ValueError:\r\n try:\r\n grprec = grp.getgrnam(name)\r\n except KeyError:\r\n raise ValueError(\"Invalid group name %s\" % name)\r\n gid = grprec[2]\r\n else:\r\n try:\r\n grp.getgrgid(gid) # check if gid is valid\r\n except KeyError:\r\n raise ValueError(\"Invalid group id %s\" % name)\r\n return gid", "def fregion_id_by_name(name=None):\n f_region_types = FilteredElementCollector(doc).OfClass(FilledRegionType)\n for fregion_type in f_region_types:\n fregion_name = Element.Name.GetValue(fregion_type)\n if not name or name.lower() == fregion_name.lower():\n return fregion_type.Id\n # Loops through all, not found: use last\n else:\n print('Color not specified or not found.')\n return fregion_type.Id", "def key_for_name(name):\n return 'hotqueue:%s' % name" ]
[ "0.6672799", "0.6143603", "0.6004908", "0.5935843", "0.588576", "0.5862737", "0.58363074", "0.5768236", "0.5768236", "0.576694", "0.57117486", "0.55991846", "0.5593386", "0.5582992", "0.5563325", "0.55218446", "0.55121285", "0.5479918", "0.5430901", "0.5421324", "0.54157084", "0.54077286", "0.5360876", "0.5307637", "0.53018755", "0.52568567", "0.52363575", "0.5226186", "0.52243304", "0.52128166" ]
0.7906555
0
Concatenate each sample in samples horizontally, along axis 1. Return the resulting array.
def get_concatenated_row(samples): return np.concatenate([sample for sample in samples], axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_concatenated_col(samples):\n return np.concatenate([sample for sample in samples], axis=0)", "def concatenate(arrays, **kwargs):\n unit = unit_of(arrays[0])\n result = np.concatenate([to_unitless(arr, unit) for arr in arrays], **kwargs)\n return result * unit", "def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)", "def concatonate(data):\n tmp = np.array(data)\n tmp = np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], -1))\n return tmp", "def wrap(self, samples):\n\n rows, cols = samples.shape[:2]\n wraped_samples = np.ones((rows, cols + 1))\n wraped_samples[:, :cols] = samples\n return wraped_samples", "def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)", "def concatenate(tensors, axis=0):\n raise NotImplementedError", "def concat(vars, axis=-1):\n return concatenate(vars, axis)", "def concat(list_of_arrays):\n shape = np.shape(list_of_arrays)\n newShape = [ shape[0]*shape[1] ]\n if len(shape)>2:\n for i in range(2,len(shape)):\n newShape.append(shape[i])\n \n array_concat = np.zeros(newShape)\n s=0\n e=shape[1]\n \n for i in range(0,shape[0]):\n array_concat[s:e] = list_of_arrays[i]\n s=e\n e=e+shape[1] \n return array_concat", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def dim_zero_cat(x: Union[Tensor, List[Tensor]]) ->Tensor:\n x = x if isinstance(x, (list, tuple)) else [x]\n x = [(y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y) for y in x]\n if not x:\n raise ValueError('No samples to concatenate')\n return torch.cat(x, dim=0)", "def concat_images(X):\n nc,h,w,_ = X.shape\n X = X.reshape(nc,h,w)\n n = np.ceil(np.sqrt(nc)).astype(\"int8\")\n img = np.zeros((n*w,n*h))\n x = 0\n y = 0\n for example in range(nc):\n img[x*w:(x+1)*w,y*h:(y+1)*h] = X[example]\n y += 1\n if y >= n:\n y = 0\n x += 1\n return img", "def concatenate_offset(self, X):\n return np.c_[np.ones((X.shape[0], 1)), X]", "def concat_same(context, number):\n buffer = context\n for i in range(0, number - 1):\n buffer = np.concatenate((buffer, context), axis=0) \n return buffer", "def concat_rotated_images(X):\n X_aug = copy.deepcopy(X)\n for key in X_aug.keys():\n X_aug[key] = np.concatenate([X[key][:, :, :, :],\n X[key][:, :, ::-1, :],\n X[key][:, ::-1, :, :],\n X[key][:, ::-1, ::-1, :]])\n return X_aug", "def batch_concat(\n values: types.NestedArray,\n num_batch_dims: int = 1,\n) -> jnp.ndarray:\n flatten_fn = lambda x: _flatten.apply(None, x, num_batch_dims)\n flat_leaves = tree.map_structure(flatten_fn, values)\n return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)", "def concatenate(tensors, axis=-1):\n if axis < 0:\n rank = ndim(tensors[0])\n if rank:\n axis %= rank\n else:\n axis = 0\n\n if py_all([is_sparse(x) for x in tensors]):\n return sparse_ops.sparse_concat(axis, tensors)\n else:\n return array_ops.concat([to_dense(x) for x in tensors], axis)", "def samples(self):\n return np.concatenate([wf.samples for wf in self._waveforms])", "def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)", "def flatten(self, x):\n return np.concatenate([c.flatten(xi) for c, xi in zip(self.spaces, x)])", "def transform(self, Xs, y=None):\n Xs = check_Xs(Xs)\n return np.hstack(Xs)", "def concat_lists(column):\n arrays = list_array(column)\n return np.concatenate(arrays)", "def conver1D(array):\n l = array.shape\n total = np.zeros((0, l[1] * l[2]), dtype=np.float32)\n i = 0\n for i in range(24):\n tempData = array[i]\n array1D = []\n for x in tempData:\n for s in x:\n array1D.append(s)\n total = np.insert(total, i, array1D, axis=0)\n return total", "def get_agg(self, x, ids):\n \n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_agg = torch.mean(x[ids == i], 0).repeat(sample_size, 1)\n \n # concatenate each group of aggregated data\n if i == 0:\n agg = sample_agg \n else:\n agg = torch.cat((agg, sample_agg), dim=0)\n \n return agg", "def stack(arrs):\n\treturn np.concatenate([a[...,np.newaxis] for a in arrs], axis=-1)", "def concat_mean_stats(inputs):\n stats = torch.mean(inputs, 0, keepdim=True)\n stats = stats.expand(inputs.size())\n return torch.cat([stats, inputs], dim=1)", "def test_combine_nsamples_one_array():\n test_samples = np.ones((2, 13, 21)) * 3\n samples_out = utils.combine_nsamples(test_samples, axis=0)\n test_full_samples = np.ones((2, 2, 13, 21)) * 3\n assert np.allclose(test_full_samples, samples_out)", "def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result", "def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result", "def concatenate(tensor_list, axis=0):\n concat_size = sum(tt.shape[axis] for tt in tensor_list)\n\n output_shape = ()\n for k in range(axis):\n output_shape += (tensor_list[0].shape[k],)\n output_shape += (concat_size,)\n for k in range(axis + 1, tensor_list[0].ndim):\n output_shape += (tensor_list[0].shape[k],)\n\n out = T.zeros(output_shape)\n offset = 0\n for tt in tensor_list:\n indices = ()\n for k in range(axis):\n indices += (slice(None),)\n indices += (slice(offset, offset + tt.shape[axis]),)\n for k in range(axis + 1, tensor_list[0].ndim):\n indices += (slice(None),)\n\n out = T.set_subtensor(out[indices], tt)\n offset += tt.shape[axis]\n\n return out" ]
[ "0.72359675", "0.6499968", "0.6493327", "0.6439808", "0.619693", "0.6166372", "0.60156375", "0.5976714", "0.58696514", "0.5841752", "0.57607317", "0.573856", "0.5716519", "0.5697366", "0.565814", "0.5611839", "0.5608943", "0.557394", "0.5545506", "0.55205667", "0.55101043", "0.54553825", "0.5441027", "0.5430662", "0.54305416", "0.54231983", "0.54132885", "0.54008144", "0.54008144", "0.5390445" ]
0.7622855
0
Concatenate each sample in samples vertically, along axis 0. Return the resulting array.
def get_concatenated_col(samples): return np.concatenate([sample for sample in samples], axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_concatenated_row(samples):\n return np.concatenate([sample for sample in samples], axis=1)", "def concatonate(data):\n tmp = np.array(data)\n tmp = np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], -1))\n return tmp", "def wrap(self, samples):\n\n rows, cols = samples.shape[:2]\n wraped_samples = np.ones((rows, cols + 1))\n wraped_samples[:, :cols] = samples\n return wraped_samples", "def dim_zero_cat(x: Union[Tensor, List[Tensor]]) ->Tensor:\n x = x if isinstance(x, (list, tuple)) else [x]\n x = [(y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y) for y in x]\n if not x:\n raise ValueError('No samples to concatenate')\n return torch.cat(x, dim=0)", "def collater(self, samples):\r\n raise NotImplementedError", "def concatenate(tensors, axis=0):\n raise NotImplementedError", "def get(self):\n return np.hstack((self.data[:, self.cur:], self.data[:, :self.cur])) #Concatena los datos en horizontal", "def unroll(self):\n\n return numpy.hstack([k.flat for k in self.weights])", "def concatenate(arrays, **kwargs):\n unit = unit_of(arrays[0])\n result = np.concatenate([to_unitless(arr, unit) for arr in arrays], **kwargs)\n return result * unit", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)", "def stack(arrs):\n\treturn np.concatenate([a[...,np.newaxis] for a in arrs], axis=-1)", "def concat(vars, axis=-1):\n return concatenate(vars, axis)", "def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)", "def _umap_concat(data, **umap_kwargs):\n data_tiles = []\n for i in range(5):\n data_i = slice_vec_bands(data, start=i, end=i + 1)\n data_tiles.append(umap.UMAP(**umap_kwargs).fit_transform(data_i))\n\n data_concat = numpy.empty((\n data_tiles[0].shape[0],\n sum(dt.shape[1] for dt in data_tiles)\n ))\n\n start_col = 0\n for dt in data_tiles:\n end_col = start_col + dt.shape[1]\n data[:, start_col:end_col] = dt\n start_col = end_col\n\n return data_concat", "def concat_same(context, number):\n buffer = context\n for i in range(0, number - 1):\n buffer = np.concatenate((buffer, context), axis=0) \n return buffer", "def concatenate_offset(self, X):\n return np.c_[np.ones((X.shape[0], 1)), X]", "def concat_rotated_images(X):\n X_aug = copy.deepcopy(X)\n for key in X_aug.keys():\n X_aug[key] = np.concatenate([X[key][:, :, :, :],\n X[key][:, :, ::-1, :],\n X[key][:, ::-1, :, :],\n X[key][:, ::-1, ::-1, :]])\n return X_aug", "def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)", "def transform(self, Xs, y=None):\n Xs = check_Xs(Xs)\n return np.hstack(Xs)", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def concat(V, s):\n\n X = []\n for k in range(s):\n x = []\n for j in V:\n x.append(j[k])\n X.append(vertcat(*x))\n return X", "def enframe(samples, winlen, winshift):\n\n # check if i+winlen > len(samples):\n\n result = []\n for i in range(0,len(samples),winshift):\n if(i+winlen > len(samples)): break\n result.append(samples[i:i+winlen])\n return np.array(result)\n # return np.array([samples[i:i+winlen] for i in range(0,len(samples),winshift)])", "def concat_inputs(context, num_frames, adjacent_frames):\n buffer = context[0:num_frames, :]\n for i in range(0, adjacent_frames*2):\n buffer = np.concatenate((buffer, context[i + 1 : num_frames + i + 1, :]), axis=1) \n return buffer", "def fetch_samples(self):\n return torch.cat(self.samples,dim=0).reshape(-1,self.parameters.numel())", "def generate_all_samples(self):\n\n n_samples, n_dimensions, V = self.n_samples, self.n_dimensions, self.V\n sample_all = np.zeros([n_samples, n_dimensions])\n\n X = int(0)\n for j in range(1, n_samples):\n X ^= V[self.index_of_least_significant_zero_bit(j - 1)]\n sample_all[j][:] = [float(x / math.pow(2, self.scale)) for x in X]\n return sample_all", "def grid_restack(all_vecs):\n cat_output = []\n for pos in range(all_vecs[0].shape[1]):\n pos_vecs = [x[:, None, pos, :] for x in all_vecs]\n cat_output += pos_vecs\n x2 = jnp.concatenate(cat_output, 1)\n return x2", "def get_agg(self, x, ids):\n \n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_agg = torch.mean(x[ids == i], 0).repeat(sample_size, 1)\n \n # concatenate each group of aggregated data\n if i == 0:\n agg = sample_agg \n else:\n agg = torch.cat((agg, sample_agg), dim=0)\n \n return agg", "def concat(list_of_arrays):\n shape = np.shape(list_of_arrays)\n newShape = [ shape[0]*shape[1] ]\n if len(shape)>2:\n for i in range(2,len(shape)):\n newShape.append(shape[i])\n \n array_concat = np.zeros(newShape)\n s=0\n e=shape[1]\n \n for i in range(0,shape[0]):\n array_concat[s:e] = list_of_arrays[i]\n s=e\n e=e+shape[1] \n return array_concat", "def samples(self):\n return np.concatenate([wf.samples for wf in self._waveforms])" ]
[ "0.6825311", "0.60100585", "0.5970394", "0.5881012", "0.57274216", "0.5629702", "0.55640787", "0.5547741", "0.55183864", "0.5463207", "0.5458389", "0.5414258", "0.5409394", "0.5360371", "0.5326923", "0.5292973", "0.5278876", "0.5277933", "0.5271905", "0.5263208", "0.52608395", "0.5247178", "0.52208346", "0.5208143", "0.52080554", "0.52036613", "0.5194638", "0.51836896", "0.5152405", "0.51486903" ]
0.69898236
0
Callback for event_in msg
def eventInCallback(self, msg): rospy.loginfo("event_in msg received") self.event_in = msg.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_in_cb(self, msg):\n self.event = msg.data", "def msg_event(self, event):\r\n pass", "def event_receive(self,event):\n\n pass", "def _handle_message(self, msg):\n self.event('message', msg)", "def process_event(self, event):\r\n pass", "def on_event(self, event):", "def on_message(data):\n pass", "def on_event(self, event):\r\n pass", "def handle_event(self, event):", "def visit_event(self, event):", "def on_event(self, event):\n pass", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def eventReceived(self, event):\n print repr(event)", "def handle_event(self, event):\n pass", "def handleMessage(msg):", "def listener(self, event):\n print \"TB:@%s arrived event %s\" % (event.time, event) \n informFunction = self._informFunc\n informFunction((event.time, event.state))\n return []", "def handleEvent(self, event):\n pass", "def process(self, event):\n pass", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def on_msg(self, callback):\n self._msg_callback = callback", "def message_callback(self, message):\n pass", "def events(self):", "def handle_msg(self, state_id, msg):\n pass", "def callback(parsed_msg, msg_object):\n assert msg_object.stream_id == stream_id\n assert parsed_msg in msg", "def process_IN_OPEN(self, event):", "def _handle_PacketIn(self, event):\r\n\r\n packet = event.parsed # This is the parsed packet data.\r\n if not packet.parsed:\r\n log.warning(\"Ignoring incomplete packet\")\r\n return\r\n\r\n packet_in = event.ofp # The actual ofp_packet_in message.\r\n\r\n # Comment out the following line and uncomment the one after\r\n # when starting the exercise.\r\n #self.act_like_hub(packet, packet_in)\r\n self.act_like_switch(packet, packet_in)\r\n #self.act_like_router(packet, packet_in)\r", "def SendMessage(self, event):\n pass", "def on_event_finished(self, event):", "def on_event(self, event):\r\n\r\n print(\"on event called, event:\", event)\r\n\r\n self.state = self.state.on_event(event)\r\n publish_state_msg(state_msg, odrive_bridge.get_state())" ]
[ "0.8662638", "0.78358436", "0.7139262", "0.69602036", "0.69505644", "0.69212675", "0.68724275", "0.6847703", "0.68112975", "0.67634106", "0.67600983", "0.65930665", "0.65206", "0.6503782", "0.64967126", "0.6489662", "0.6486914", "0.6473191", "0.64333844", "0.64333844", "0.6420191", "0.64068246", "0.6381816", "0.62765974", "0.6266916", "0.6261129", "0.6239578", "0.6238063", "0.6216749", "0.62092614" ]
0.8784337
0
True if a next page exists.
def has_next(self): return self.page < self.pages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_next(self):\n return self.current_page < self.pages", "def has_next_page(self):\n if self.page_number == 0:\n return True\n\n return self.next_page_token is not None", "def list_has_next_page(self, soup):\n\n # Check for the 'next page' element at the bottom of the page\n next_page_exists = soup.find('a', class_='pager pager-next')\n\n # If this element exists, there is a next page of apartments to parse\n if next_page_exists:\n return True\n else:\n return False", "def has_next(self) -> bool:\n if not self._exhausted:\n iter(self)\n\n return bool(self._queue or self._has_next_page())", "def has_next():\n\n return True", "def has_next(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False", "def has_next_page(response: scrapy.http.Response):\n next_buttons = response.xpath('//span[@class=\"glyphControl glyphPaginationNext\"]').extract()\n return bool(next_buttons)", "def has_next(self):\n return len(self.pile) > 0", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def has_next():", "def has_next(self) -> bool:\n return self.published_after().count() != 0", "def has_next(self):\n if self._count is not None:\n # If count is available, use it\n return bool(self._count)\n else:\n # otherwise we have no idea\n return True", "def have_next_page(self):\n page_nav = self.driver.find_element(*CustomerGroupsPage.PAGE_NAVIGATION)\n next = page_nav.find_element(*CustomerGroupsPage.NEXT_BTN)\n link = next.find_element_by_tag_name(\"a\")\n if \"disable\" not in next.get_attribute(\"class\"):\n link.click()\n return True\n else: \n return False", "def _has_next_page(self):\n if self._search_data is None:\n return True\n begin_index = int(self._params['beginIndex'])\n product_count = int(self._search_data['totalCount'])\n page_size = int(self._search_data['pageSize'])\n # return True if there are more products to parse\n return begin_index < product_count", "def has_next(self):\n return self.count < len(self)", "def has_next(self):\n return not self.finished_function(self.peek)", "def has_next(self):\n regf = self.first_hbin().parent()\n if regf.hbins_size() + regf.first_hbin_offset() == self._offset_next_hbin:\n return False\n\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def has_next(self) -> bool:\n return self._high is None or self._low < self._high", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def has_next(self) -> bool:\n return self._bin_iter.has_next()", "def check_if_next(driver, num_pages):\n \n try: \n next_link = driver.find_element_by_xpath(\"//li[@class='next']\")\n page_links = driver.find_elements_by_xpath(\n \"//li//span[@class='disabled']\")\n last_page = check_if_last_page(page_links, num_pages)\n if last_page: \n return False\n time.sleep(random.randint(3, 6))\n next_link.click()\n return True\n except Exception as e:\n print e\n return False", "def is_first_page(self):\n return 1 == self.page", "def hasNext(self) -> bool:\n return self.index + 1 < len(self.nodes_sorted)", "def hasNext(self) -> bool:\n return self.pointer < len(self.ordered_nodes)", "def has_previous(self):\n return self.page > 1", "def exists(self, page: str) -> bool:\n\n if \"-1\" in requests.get(self.apiurl.format(page)).json()[\"query\"][\"pages\"]:\n return False\n return True", "def has(self, page):\n for entry in self._entries:\n if entry.page == page:\n return True\n return False", "def if_next(self, **kwargs):\n\n if kwargs.get('event') == 'next':\n logging.debug(u\"- asked to move to next step\")\n return True\n\n return False" ]
[ "0.8679172", "0.8661847", "0.80350786", "0.7795365", "0.77713907", "0.7578386", "0.7563336", "0.74716824", "0.7424023", "0.73894376", "0.73567814", "0.7348578", "0.7318991", "0.73121655", "0.7292514", "0.72168624", "0.71238124", "0.7098628", "0.7048289", "0.6925768", "0.69049555", "0.68617827", "0.6844657", "0.6843075", "0.68209875", "0.6804444", "0.6764844", "0.6739881", "0.669928", "0.6663205" ]
0.86719596
1
Renames the Storage Element in the DBS.
def dbsApiImplRenameSE(self, storage_element_from, storage_element_to): funcInfo = inspect.getframeinfo(inspect.currentframe()) seNameFrom = get_name(storage_element_from) seNameTo = get_name(storage_element_to) data = self._server._call ({ 'api' : 'updateSEName', 'storage_element_name_from' : seNameFrom, 'storage_element_name_to' : seNameTo }, 'POST')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename_vg(self, new_name):\n self.metadata.rename_vg(new_name)", "def name(self, new_name):\n self.rename(new_name)", "def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def rename(self, name):\n self.name = name", "def rename(oldname, newname):", "def rename(self, name):\n return self.parent.rename(self, name)", "def rename(old, new):", "def rename(old, new):", "def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False", "def rename(self, name: str):\n self.doc['name'] = name", "def host_renameOpsiDepotserver(self, oldId, newId):", "def rename(self, container_id, name):\n context = pecan.request.context\n container = _get_container(container_id)\n check_policy_on_container(container.as_dict(), \"container:rename\")\n\n if container.name == name:\n raise exception.Conflict('The new name for the container is the '\n 'same as the old name.')\n container.name = name\n container.save(context)\n return view.format_container(pecan.request.host_url, container)", "def setName(self, attributeIndex, newName) -> None:\n ...", "def rename(self, src, dst):\n os.rename(src, dst)", "def rename_node(self, node, name):\r\n\r\n if not name:\r\n raise ValueError(\"No node name provided for rename\")\r\n if name in self.nodes():\r\n raise ValueError(\"Node with name '%s' already exists\" % name)\r\n\r\n old_name = self.node_name(node)\r\n\r\n del self.nodes[old_name]\r\n self.nodes[name] = node", "def rename(self, name):\n item = self.list_item_all_fields\n item.set_property('Title', name)\n item.set_property('FileLeafRef', name)\n qry = UpdateEntityQuery(item)\n self.context.add_query(qry)\n return self", "def rename(self, new_name):\n method = \"rename_vault\"\n params = {\n \"vault_id\": self.id,\n 'vault_name': new_name\n }\n return self._client.connection.make_request(method, params)", "def change_image_name(self, name):\n self.image.name = name", "def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None", "def change_name(self, item):\n # Get the new name.\n new_name = str(item.text())\n if not new_name or not self.item_name:\n return None\n\n # See if the name was actually changed.\n if new_name == self.item_name:\n return None\n\n # If it was, change the name in the list/tree view and in Maya.\n if not new_name:\n item.setText(self.item_name)\n self.item_name = cmds.rename(self.item_name, new_name)\n item.setText(self.item_name)", "def SearchAndReplaceRigElementNames(rig, search_string, replace_string):\n\n hierarchy_mod = rig.get_hierarchy_modifier()\n selection = hierarchy_mod.get_selection()\n\n if not selection:\n\n return\n\n for item in selection:\n\n src_name = str(item.get_editor_property(\"name\"))\n\n new_name = src_name.replace(search_string, replace_string)\n\n hierarchy_mod.rename_element(item, new_name)", "def rename():\n database.ask(mode='single')\n F = database.check(single=True)\n res = askItems([['Name',database.names[0]]],\n caption = 'Rename variable')\n if res:\n name = res['Name']\n export({name:F})\n database.forget()\n database.set(name)", "def rename(self, name):\n return self.client.api.rename(self.id, name)", "def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)", "def rename_var(self, old_id, new_id): # to be overriden in subclasses when necessary\n pass", "def storage_class_name(self, value: str):\n self._properties[\"storageClassName\"] = value", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def rename(self, newname):\n # set the new column name\n self.colname = newname", "def renameAssetObjects(self):\n\t\tfor i,o in enumerate( self.objects ):\n\t\t\tmn.Node( o ).name = self.name + '%i'%i" ]
[ "0.64199173", "0.6355908", "0.60965645", "0.5981445", "0.59720033", "0.5965701", "0.58491737", "0.584779", "0.584779", "0.58017844", "0.5783619", "0.57192796", "0.57183737", "0.56618834", "0.5659627", "0.5656443", "0.5645224", "0.5639042", "0.5623491", "0.5615697", "0.5606497", "0.559358", "0.55917615", "0.55784893", "0.55655557", "0.55440176", "0.55365163", "0.5534471", "0.5531102", "0.5525798" ]
0.71032864
0
Returns a slip number starting at 1 and auto increments thereafter. NO RESET ON PROGRAM REDEPLOY only on deleting Slip entities.
def getSlipNum(): query = Slip.query() results = query.fetch(limit = MAX_SLIPS) temp = 0 for result in results: if result.number > temp: temp = result.number slipNum = temp slipNum += 1 return slipNum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id(self):\n _id = super(SchedulePhase, self).id\n return _id + 1", "def _get_sprint_number() -> int:\n sprint = get_value_from_redis('sprint-number')\n if not sprint:\n sprint = JIRA_SPRINT\n return int(sprint)", "def next_move(ttype):\n count = db.session.query(StockMove.id).count() + 1\n return str('SO/' if ttype =='sale' else 'PO/') + str(count)", "def __get_sequence_number(self):\n if self.counter > 999:\n self.counter = 0\n else:\n self.counter += 1\n\n str_sequence_num = self.counter + 256\n str_hex_sequence_num = hex(str_sequence_num)[2:]\n return str_hex_sequence_num", "def _getNewAnnId(self):\n\n newAnnId = COCO_PLUS.ANN_ID\n COCO_PLUS.ANN_ID += 1\n\n return newAnnId", "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()", "def _GetNextId(self):\r\n ret = self.next_id\r\n self.next_id += 1\r\n return str(self.next_id)", "def get_sequence(self):\n self.__sequence = self.__sequence + 1\n return self.__sequence - 1", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def next_int(self):\n self.innovation_number += 1\n return self.innovation_number", "def get_update_number( self ):", "def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur", "def make_destiID(self):\n\n desti_ob_list = self.list_all_destinations()\n number = 1 + len(desti_ob_list)\n self.destiID_number = \"0\" + str(number)\n\n return self.destiID_number", "def _getNewImgId(self):\n\n newImgId = COCO_PLUS.IMG_ID\n COCO_PLUS.IMG_ID += 1\n\n return newImgId", "def id_count(self) -> int:\n tmp = self._id_count\n self._id_count -= 1\n return tmp", "def get(self) -> int:\n return self.nums.pop() if self.nums else -1", "def next_num(cls):\r\n cls.num += 1\r\n return cls.num", "def get_new(self, value):\n if value is not None:\n raise ValueError('The pk for %s is \"auto-increment\", you must not fill it' % \\\n self._model._name)\n key = self._instance.make_key(self._model._name, 'max_pk')\n return self.normalize(self.connection.incr(key))", "def generate_group_id(self):\n if not hasattr(self.space, '_group_ctr'):\n self.space._group_ctr = 999\n self.space._group_ctr += 1\n return self.space._group_ctr", "def getID(self):\n global COUNT, C_LOCK\n with C_LOCK:\n COUNT += 1\n return COUNT", "def next_invoice_number(cls, user):\n cur_max = cls.query.filter_by(user_id=user.id).count()\n cur_max += 1\n\n return str(cur_max)", "def increment_counter(self) -> None:", "def save_increment(self):\n self.version = self.next_available_version()\n return self.save()", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def __generate_id(self):\n ids = [int(fd.get('id')) for fd in self.favorite_drinks]\n return str(max(ids)+1)", "def get_next_id():\n global _lock, _counter\n with _lock:\n if _counter == 65535:\n _counter = 1\n else:\n _counter += 1\n\n return str(_counter)", "def getItemId(self):\n self.itemId += 1\n return self.itemId" ]
[ "0.56331414", "0.55639076", "0.54039794", "0.53689134", "0.52841884", "0.52100134", "0.5177412", "0.5177412", "0.51675767", "0.51634294", "0.5161609", "0.513854", "0.51240927", "0.5112452", "0.51088494", "0.51081693", "0.5086091", "0.508494", "0.5073603", "0.5061621", "0.5052252", "0.50411284", "0.503917", "0.50381464", "0.5029067", "0.5018142", "0.49965623", "0.4993396", "0.49928078", "0.49897718" ]
0.66455674
0
Instantiates a new boat object and returns json string with its details.
def post(self): parent_key = ndb.Key(Boat, "parent_boat") boat_data = json.loads(self.request.body) new_boat = Boat(id=None, name=boat_data['name'], type=boat_data['type'], length=boat_data['length'], at_sea=True, parent=parent_key) new_boat.put() new_boat.id = '/Boat/' + new_boat.key.urlsafe() new_boat.put() boat_dict = new_boat.to_dict() self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(boat_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n query = Boat.query()\n results = query.fetch(limit = MAX_BOATS)\n boat_dicts = []\n for match in results:\n boat_dicts.append({'id': match.id, 'name': match.name, 'type': match.type,\n 'length': match.length, 'at_sea': match.at_sea })\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dicts))", "def load_create():\n # Ensure boat has all required information, return error otherwise\n load_validator.require_all = True\n\n if request.get_json() is None\\\n or not request.data \\\n or not load_validator.validate(request.get_json()):\n\n failed = {\"Error\": \"The request object does not follow specifications - see documentation.\"}\n response = Response(\n response=json.dumps(failed),\n status=400,\n mimetype='application/json'\n )\n return response\n elif \"application/json\" not in request.accept_mimetypes \\\n and request.headers.get(\"Accept\") is not None\\\n and request.headers.get(\"Accept\") != \"\":\n response = Response(\n response=json.dumps({\"Error\": \"This body type is not supported.\"}),\n status=406,\n mimetype='application/json'\n )\n return response\n\n content = request.get_json()\n new_load = datastore.entity.Entity(key=client.key(\"load\"))\n new_load.update({\n \"weight\": content[\"weight\"],\n \"content\": content[\"content\"],\n \"description\": content[\"description\"],\n \"carrier\": None,\n })\n client.put(new_load)\n\n new_load[\"id\"] = new_load.key.id\n new_load[\"self\"] = request.url_root + \"loads/\" + str(new_load.key.id)\n\n response = Response(\n response=json.dumps(new_load),\n status=201,\n mimetype='application/json'\n )\n return response", "def bridge_create_json():\n return {\n \"base_stations\": {\n \"id\": 98765,\n \"name\": \"New Bridge\",\n \"mode\": \"home\",\n \"hardware_id\": \"0x1234567890abcdef\",\n \"hardware_revision\": 4,\n \"firmware_version\": {\n \"wifi\": \"0.121.0\",\n \"wifi_app\": \"3.3.0\",\n \"silabs\": \"1.0.1\",\n },\n \"missing_at\": None,\n \"created_at\": \"2019-04-30T01:43:50.497Z\",\n \"updated_at\": \"2019-04-30T01:44:43.749Z\",\n \"system_id\": 12345,\n \"firmware\": {\"wifi\": \"0.121.0\", \"wifi_app\": \"3.3.0\", \"silabs\": \"1.0.1\"},\n \"links\": {\"system\": 12345},\n }\n }", "def get(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def test_create_boat(self):\n pass", "def create() -> TJsonResponse:\n if request.headers['Content-Type'] == 'application/json':\n url = request.json.get('url')\n else:\n url = request.form.get('url')\n if not url:\n return jsonify(error='bad request'), 400\n result = scrape.scrape_meta_for_url(url)\n inserted_id, tags = result.get()\n url_hash = encode(inserted_id)\n response_body: Dict[str, Any] = jsonify(hash=url_hash, short_url=f'https://fanlens.io/@{url_hash}', tags=tags)\n return response_body", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def __init__(self, json_map, s, b, o):\n self.pekingMap = init_map(json_map)\n self.character = Character(s)\n self.startLocation = s\n self.budget = b\n self.occupiedLocations = o", "def create_cab(self):\n cab = Cab()\n cab.type = self.info['type']\n cab.driver_name = self.info.get('driver_name')\n cab.rc_number = self.info['rc_number']\n cab.city_id = self.info['city_id']\n cab.company_name = self.info['company_name']\n cab.model_name = self.info['model_name']\n cab.update_time = datetime.utcnow().replace(microsecond=0)\n self.cab_id = self.save(cab)\n\n # we can do asynchronously\n self.create_cab_state()\n return self.cab_id", "def new(cls):\n with open(app.config[\"BLANK_CHARACTER_FILE\"]) as fp:\n character = json.load(fp)\n character[\"abilities\"] = cls.abilities()\n return character", "def test_get_boat(self):\n pass", "def __init__(self, data):\n self.json = data\n self.id = data.get(\"ID\", None)\n self.name = data.get(\"Name\", None)\n self.domain = data.get(\"Domain\", None)\n self.brand_id = data.get(\"BrandID\", None)\n self.account_id = data.get(\"AccountID\", None)\n self.brand_name = data.get(\"BrandName\", None)\n self.country_id = data.get(\"CountryID\", None)\n self.country_name = data.get(\"CountryName\", None)\n self.account_name = data.get(\"AccountName\", None)\n self.pre_order = data.get(\"PreOrder\", None)\n self.type_id = data.get(\"TypeID\", None)\n self.type_name = data.get(\"TypeName\", None)\n self.nominal_code_id = data.get(\"NominalCodeID\", None)\n self.external_shop_id = data.get(\"ExtShopID\", None)\n self.pseudo_stock_level_type = data.get(\"PseudoStockLevelType\", None)\n self.currency_symbol = data.get(\"CurrencySymbol\", None)\n self.loyalty_point_per_value = data.get(\"LoyaltyPointPerValue\", None)\n self.loyalty_value_per_point = data.get(\"LoyaltyValuePerPoint\", None)\n self.disabled = data.get(\"disabled\", None)\n self.deleted = data.get(\"deleted\", None)\n self.note = data.get(\"Note\", None)", "def add_battery():\n data = request.get_json()\n battery = battery_rent_service.add(**data)\n battery = model_to_dict(battery)\n return jsonify({'response': battery}), 200", "def main(bow: api_models.Bow):\n found_matching = list(\n BowModel.scan((BowModel.type == bow.type) & (BowModel.size == bow.size))\n )\n if found_matching:\n return bad_request(f\"{bow.size} {bow.type} bows already exist\")\n new_bow = BowModel(**bow.dict())\n new_bow.save()\n new_bow.refresh()\n created_bow = api_models.BowWithID(**new_bow.attribute_values)\n return success(\n {\n \"item\": created_bow.dict(),\n \"message\": f\"{created_bow.size} {created_bow.type} bow created\",\n },\n 201,\n )", "def to_init_json(self) -> JSON:\n pass", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n # adicionando atributo especifico dessa classe\n self.batery_size = Batery(100)", "def __init__(self, order_json):\n self.shop = order_json['shop']\n self.size = order_json['size']\n self.customer_name = order_json['name']\n self.drink_name = order_json['drink']\n self.customer_number = order_json['customer_number']\n self.location = order_json['location']\n self.details = order_json['details']", "def json_dumps(self):\n application_obj = {\n \"id\": self.id,\n \"party\": Party.get_party_by_name(name=self.party_name),\n \"office\": Office.get_office_by_name(name=self.office_name),\n \"user\": User.find_user_by_id(id=self.user_id),\n \"date_created\": self.date_created,\n \"status\":self.status\n }\n return application_obj", "def business_info():\n print \"hello\"\n \n yelp_ids_empty = {}\n yelp_ids_dict = yelp_to_salon_list_SF('nail salon', yelp_ids_empty)\n\n businesses = {}\n\n # for business in yelp_ids_dict:\n # businesses = {\n # business.yelp_id: {\n # \"yelpID\": business.yelp_id,\n # \"businessName\": business.business_name,\n # \"busLat\": business.bus_lat,\n # \"busLong\": business.bus_long,\n # \"address\": business.address,\n # \"phone\": business.phone\n # }\n # }\n\n\n return jsonify(yelp_ids_dict)", "def __json_init__(cls, **kwargs):\n return cls(**kwargs)", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def json_dump(self):\n return {\n \"id\":self.id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othername\": self.othername,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"passporturl\": self.passporturl,\n \"roles\": self.roles,\n \"nationalid\": self.nationalid,\n \"county\": self.county,\n \"date_created\": self.date_created,\n \"date_modified\": self.date_modified\n }", "def place_bid():\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('userID'):\n abort(400)\n if not data.get('amount'):\n abort(400)\n if not data.get('petID'):\n abort(400)\n\n #new_uuid = str(uuid.uuid4())\n mod.place_a_bid(data['petID'], data['amount'], data['userID'])\n # HTTP 200 Created\n # return jsonify({\"id\": new_uuid}), 200\n resp = {\"status\": \"OK\"}\n return jsonify(resp)", "def get():\n return jsonify(baby='knight2'), 200", "def jsonizable_object(self):\n obj = {\n 'title': self.title,\n 'url': self.url,\n 'abstract': self.abstract\n }\n if self.metadata:\n obj['metadata'] = self.metadata\n return obj", "def construct_json(self):\n\n if 'message' not in self.data:\n self.data['message'] = self.message\n\n if self.status == 200:\n self.data['status'] = 'OK'\n else:\n self.data['status'] = 'Not OK'\n\n return json.dumps(self.data)", "def add_bowl(self, env, bowl_color, width, height):\n\n bowl_size = (0.12, 0.12, 0)\n bowl_urdf = \"bowl/bowl.urdf\"\n bowl_pose = self.get_random_pose(env, bowl_size)\n bowl_id = env.add_object(bowl_urdf, bowl_pose, \"fixed\")\n pb.changeVisualShape(\n bowl_id, -1, rgbaColor=utils.COLORS[bowl_color] + [1])\n bowl_pix = utils.xyz_to_pix(bowl_pose[0], self.bounds, self.pix_size)\n bowl_obj_info = {\n \"obj_id\": bowl_id,\n \"pose\": bowl_pose,\n \"size\": bowl_size,\n \"urdf\": bowl_urdf,\n \"color\": bowl_color,\n \"pix\": bowl_pix,\n \"unknown_color\": bowl_color in utils.EVAL_COLORS,\n \"region\": determine_region(bowl_pix[0], bowl_pix[1], width, height),\n }\n\n return bowl_obj_info", "def _create_berth(self, row):\n\n berth = self.world.create_entity()\n\n position = np.array([\n float(row[\"lon\"]),\n float(row[\"lat\"])\n ])\n\n berth_info = BerthInfo(\n row[\"id\"],\n row[\"name\"],\n row['max_quay_length'],\n float(row['max_depth']),\n self.vessel_content_types(int(row[\"ship_types\"])),\n allowed_vessel_classes=\n self.berth_service_distribution_factory.get_allowed_vessel_classes_for_terminal(row[\"terminal\"]),\n section=row[\"section\"])\n\n sampler = self.berth_service_distribution_factory.service_time_sampler(row[\"terminal\"])\n\n self.world.add_component(berth, Position(lonlat=np.array(position)))\n self.world.add_component(berth, berth_info)\n self.world.add_component(berth, BerthStateMachine(sampler, self.berth_randomized_check_prob))\n \n return berth", "def __str__(self):\n return self.AsJsonString()", "def create_custom():\n # Extract initialisation parameters\n alpha = request.args.get('alpha')\n alpha = float(alpha)\n generations = request.args.get('generations')\n generations = int(generations)\n beta = request.args.get('beta')\n beta = float(beta)\n pec = request.args.get('pec')\n pec = float(pec)\n q = request.args.get('q')\n q = float(q)\n\n # Extract the custom coordinates and create a list of nodes\n coords = request.args.get('custom_coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n\n # Initialise instance\n i = Instance(nodes, alpha, beta, pec, q)\n\n return jsonify(nodes=i.nodes, alpha=i.alpha, beta=i.beta, decay=i.decay,\n min_pheromone=i.min_pheromone, q=i.q,\n local_deposit=i.local_deposit, distances=i.distances,\n pheromones=i.pheromones, ants=i.ants, shortest_path=i.shortest_path,\n min_distance=i.min_distance, message=\"Instance Initialised\")" ]
[ "0.6134405", "0.5953638", "0.58842367", "0.5856151", "0.5779485", "0.5639422", "0.5606406", "0.5595455", "0.5578063", "0.55134076", "0.5495442", "0.5480072", "0.53981155", "0.53549975", "0.5312522", "0.5308995", "0.5296416", "0.5265275", "0.52622354", "0.52611625", "0.52337754", "0.523082", "0.5227277", "0.52240425", "0.52236295", "0.5220669", "0.52152544", "0.5210715", "0.52065885", "0.52038556" ]
0.6180553
0
Returns json string with Boat entity details by id.
def get(self, id=None): if id: boat = test4ValidEntity(id) if boat == None: self.response.set_status(404) else: boat_dict = boat.to_dict() self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(boat_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, id):\n return {'id': id}", "def get_item(id):\n return jsonify(id=id, name='name', number=123)", "def companies_by_id(id):\n res = requests.get('http://0.0.0.0:5002/company/{}/products'.format(id))\n return jsonify(res.json())", "def get(id):\n elements = Advertisements().get_one_element(id)\n data = jsonify(elements)\n if data is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n data.statut_code = 200\n return data", "def get(self, _id):\n if _id is None:\n return jsonify([user.serialize() for user in Goal.query.all()])\n else:\n return jsonify(Goal.query.filter_by(id=_id).all())", "def get(self):\n query = Boat.query()\n results = query.fetch(limit = MAX_BOATS)\n boat_dicts = []\n for match in results:\n boat_dicts.append({'id': match.id, 'name': match.name, 'type': match.type,\n 'length': match.length, 'at_sea': match.at_sea })\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dicts))", "def get_animal_details_by_id():\n animal_id = request.args.get('animalId')\n try:\n animal = Animal.object_as_dict(Animal.get_animal_by_id(animal_id))\n return jsonify(animal)\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def item_json(item_id):\n item_details_json = {}\n try:\n item_in_db = session.query(Item).filter_by(id=item_id).one()\n item_details_json['item'] = item_in_db.serialize\n except Exception as e:\n item_details_json['result'] = 'No data for item ID ' \\\n + str(item_id) + ': ' + str(e)\n return jsonify(item_details_json)", "def bookJSON(book_id):\n book = db_session.query(Book).filter_by(id=book_id).one()\n return jsonify(book=book.serialize)", "def json_format_by_id(id):\n fmt = Format.query.filter(Format.id==id).first()\n if fmt is None:\n abort(404)\n return jsonify(fmt.get_public_dict())", "def json_status_by_id(id):\n status = Status.query.filter(Status.id==id).first()\n if status is None:\n abort(404)\n return jsonify(status.get_public_dict())", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def get_entity_by_id(self, id):\n url = '{}/iot/devices/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers)\n return r.json()", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def json(self):\n return {\n 'id': self.id,\n 'id_bank_data': self.id_bank_data,\n 'national_id_document': self.national_id_document,\n 'country': self.country,\n 'name': self.name,\n 'surname': self.surname,\n 'mail': self.mail,\n 'google_token': self.google_token,\n 'role': self.role\n }", "def get(self, id):\n ticket = Ticket.query.filter_by(id=id).one()\n\n return jsonify(ticket)", "def get_entity_by_id(self, id):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers_ld)\n return r.json()", "def get_wbentity(id: str, language: str) -> dict:\n url = \"%s?format=json&action=wbgetentities&ids=%s&languages=%s\" % (WIKIDATA_URL, id, language)\n # Perform request\n print_debug(\"Sending GET %s\" % url)\n response = requests.get(url)\n data = response.json()\n print_debug(\"%s -> %d\" % (url, response.status_code))\n print_debug(\"%s\" % response.text)\n return data[\"entities\"][id]", "def showModelInfoJSON(style_id, model_id):\n model = session.query(Model).filter_by(id=model_id).one()\n return jsonify(model=model.serialize)", "def json(self):\n return {\n 'id': self.id,\n 'name': self.name\n }", "def detail(id):\n program = Programa.query.get(id)\n return programa_schema.jsonify(program), 200", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def send_info_for_one_cupcake(cupcake_id):\n\n cupcake = Cupcake.query.get_or_404(cupcake_id)\n serialized = cupcake.serialize()\n return jsonify(cupcake=serialized)", "def get(id: int):\r\n filename = Path(__file__).parent / \"recipe-data.csv\"\r\n files = import_file.Files()\r\n recipe_load = files.import_from_csv(filename)\r\n\r\n recipes = Recipes(recipe_load)\r\n a_recipe = recipes.filter_recipes_id(id)\r\n\r\n return jsonify(a_recipe)", "def get(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_dict = slip.to_dict()\n slip_dict['departure_history'] = {}\n slip_dict['departure_history']['departure_date'] = slip.departure_date\n slip_dict['departure_history']['departed_boat'] = slip.departed_boat\n del slip_dict['departed_boat'], slip_dict['departure_date']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def json(self):\n if self.resource:\n return jsonify(self.resource)\n return jsonify({'id': self.id})", "def put(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n else:\n boat.name = None\n if 'type' in boat_data:\n boat.type = boat_data['type']\n else:\n boat.type = None\n if 'length' in boat_data:\n boat.length = boat_data['length']\n else:\n boat.length = None\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def get_one_stock(id):\r\n print(\"<get_one_stock()>\")\r\n print(\"id: \", id)\r\n stocks = Stock.objects(id=id).first()\r\n return jsonify(json.loads(stocks.to_json()))", "def itemJSON(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n return jsonify(Item=item.serialize)", "def json_user_by_id(id):\n user = User.query.filter(User.id==id).first()\n if user is None:\n abort(404)\n return jsonify(user.get_public_dict())" ]
[ "0.6378625", "0.6359808", "0.61525065", "0.6144673", "0.6111232", "0.60792804", "0.607883", "0.60710144", "0.60687", "0.6054664", "0.59475464", "0.59396726", "0.59264404", "0.58963346", "0.58800006", "0.58496755", "0.5830119", "0.5822737", "0.5814824", "0.57946306", "0.5787585", "0.5763807", "0.574152", "0.57179767", "0.57150865", "0.5706471", "0.56937814", "0.5637215", "0.5631074", "0.56303614" ]
0.75867003
0
Deletes a Boat entity.
def delete(self, id=None): if id: boat = test4ValidEntity(id) if boat == None: self.response.set_status(404) else: if boat.at_sea == False: query = Slip.query(Slip.current_boat == boat.id) result = query.fetch(limit = 1) for match in result: match.current_boat = None match.arrival_date = None match.put() boat.key.delete() self.response.write("Boat has been deleted!") else: boat.key.delete() self.response.write("Boat has been deleted!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_boat(self):\n pass", "def delete(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n if slip.current_boat != None:\n \"\"\" Tests for a Boat \"docked\" in slip to be deleted. if found, sets the\n Boat entity at_sea property to True and deletes the slip. \"\"\"\n boat_dict = None\n query = Boat.query(Boat.at_sea == False)\n results = query.fetch(limit = MAX_BOATS)\n for match in results:\n if slip.current_boat == match.id:\n match.at_sea = True\n match.put()\n slip.key.delete()\n self.response.write(\"Slip has been deleted!\")\n else:\n slip.key.delete()\n self.response.write(\"Slip has been deleted!\")", "def delete(self):\r\n db.session.delete(self)\r\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()", "def model_delete(self, db):\n db.session.delete(self)\n db.session.commit()", "def delete(self, request, pk, bid, format=None):\n benchmarkmodel = self.get_object(pk, bid)\n benchmarkmodel.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "def delete(self, commit=True):\n db.session.delete(self)\n return commit and db.session.commit()", "def delete(self, bill_id):\n bill = BillModel.find_by_id(bill_id)\n if bill:\n bill.delete_from_db()\n\n return {'message': 'Bill deleted'}", "def delete(self):\n db.session.delete(self)\n self.__commit()", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self, commit=True):\n db.session.delete(self)\n if commit:\n db.session.commit()\n return self", "def delete(self, model):\n self._isinstance(model)\n db.session.delete(model)\n db.session.commit()", "def delete(self):\r\n self.domain.delete_item(self)", "def delete_entity(self, context, lb_obj):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)\n msg = _(\"NetScaler driver lb_obj removal: %s\") % lb_obj.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)", "def delete(self):\n self.dbm().model_delete(self)" ]
[ "0.6431189", "0.6331093", "0.6189687", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6158346", "0.6120139", "0.6097573", "0.6080516", "0.60249275", "0.60249275", "0.60249275", "0.60249275", "0.5943314", "0.5942308", "0.5902386", "0.5896856", "0.58868915", "0.58868915", "0.58856374", "0.588456", "0.5877556", "0.5837602", "0.5814181" ]
0.7158646
0
Mutates user supplied Boat entity properties by id. Unaddressed properties remain.
def patch(self, id=None): if id: boat = test4ValidEntity(id) if boat == None: self.response.set_status(404) else: boat_data = json.loads(self.request.body) if 'name' in boat_data: boat.name = boat_data['name'] if 'type' in boat_data: boat.type = boat_data['type'] if 'length' in boat_data: boat.length = boat_data['length'] boat.put() boat_dict = boat.to_dict() self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(boat_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def put(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n else:\n boat.name = None\n if 'type' in boat_data:\n boat.type = boat_data['type']\n else:\n boat.type = None\n if 'length' in boat_data:\n boat.length = boat_data['length']\n else:\n boat.length = None\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def update(self, commit=True, **kwargs):\n # Prevent changing IDS\n kwargs.pop('id', None)\n for attr, value in kwargs.iteritems():\n # Flask-restful makes everything None by default\n if value is not None:\n setattr(self, attr, value)\n return commit and self.save() or self", "def copy_from_entity(self, entity):\n for prop in entity._EndpointsPropertyItervalues():\n attr_name = prop._code_name\n value = getattr(entity, attr_name)\n if value is not None:\n if isinstance(prop, properties.EndpointsAliasProperty):\n value_set = getattr(self, attr_name) is not None\n elif isinstance(prop, ComputedProperty):\n value_set = True\n else:\n value_set = prop._name in self._values\n if not value_set:\n setattr(self, attr_name, value)", "def fix_id(entity_json):\n entity_json['object_id'] = entity_json['id']\n del entity_json['id']\n return entity_json", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def update(table, id_):\n\n # your code\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Release date: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def get_by_id(self, id):\n user = super(ExtendedUsersService, self).get_by_id(id)\n user.first_name = 'John' + str(id)\n user.last_name = 'Smith' + str(id)\n user.gender = 'male'\n return user", "def update(self, *args, **kwargs):\n selves = ['id', 'size', 'x', 'y']\n if args is not None and len(args) is not 0:\n for a in range(len(args)):\n setattr(self, selves[a], args[a])\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue", "def update(self, f):\n\n for p in self.__mapper__.attrs:\n\n if p.key == 'oid':\n continue\n try:\n setattr(self, p.key, getattr(f, p.key))\n\n except AttributeError:\n # The dict() method copies data property values into the main dict,\n # and these don't have associated class properties.\n continue", "def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "def update(self, *args, **kwargs):\n if args:\n if args is not None:\n lista = [\"id\", \"size\", \"x\", \"y\"]\n for i, j in zip(args, lista):\n setattr(self, j, i)\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def update(self, *args, **kwargs):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n if len(args) > 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n self.id = kwargs.get(\"id\", self.id)\n self.size = kwargs.get(\"size\", self.size)\n self.x = kwargs.get(\"x\", self.x)\n self.y = kwargs.get(\"y\", self.y)", "def from_entity(cls, e):\n kwargs = {name: e.get(name) for name, prop in cls._properties.items() if prop.is_id} # we need the id value\n obj = cls(**kwargs)\n obj._key = e.key\n\n for name, prop in cls._properties.items(): # set values\n if not prop.is_id:\n obj[name] = e.get(name)\n\n return obj", "def __update(self, id=None, width=None, height=None, x=None, y=None):\n arg = [id, width, height, x, y]\n var = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i in range(5):\n if arg[i] is not None:\n setattr(self, var[i], arg[i])", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update(self, *args, **kwargs):\n if args:\n li = [\"id\", \"size\", \"x\", \"y\"]\n for i in range(len(args)):\n setattr(self, li[i], args[i])\n else:\n for i, j in kwargs.items():\n setattr(self, i, j)", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def update(self, *args, **kwargs):\n if len(args) != 0:\n try:\n self.id = args[0]\n self.width = args[1]\n self.height = args[2]\n self.x = args[3]\n self.y = args[4]\n except IndexError:\n pass\n else:\n for i in kwargs.keys():\n try:\n getattr(self, i)\n except Exception as er:\n raise er\n setattr(self, i, kwargs[i])", "def update(self, *args, **kwargs):\n new = [\"id\", \"size\", \"x\", \"y\"]\n for pos, val in enumerate(args):\n setattr(self, new[pos], val)\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def update(self, identifier, new_feature):\n\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if self.id_field in feature:\n if feature[self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n elif self.id_field in feature['properties']:\n if feature['properties'][self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))", "def update(self, *args, **kwargs):\n if args and len(args) > 0:\n keys = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i, v in enumerate(args):\n setattr(self, keys[i], v)\n else:\n for k, v in kwargs.items():\n setattr(self, k, v)", "def update(self, **kwargs):\n for key, value in kwargs.items():\n try:\n is_property = isinstance(getattr(self.__class__, key), property)\n except AttributeError:\n continue\n\n if is_property:\n setattr(self, key, value)", "def test_api_object_update_property(self, api_object):\n attrs_dict = {'uuid_': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.uuid_ != 'CREATING'", "def patch_record(\n self, id_: str, fields: Dict[str, Union[str, list, None]]\n ) -> None:\n instance = self._get(id_)\n instance.update(fields)\n self.db.session.commit()", "def update_properties():\n state = request.get_json()\n if 'id' not in state:\n return jsonify({'success': False,\n 'error': 'ID not found in request!'})\n logger.debug(\"Updated Roast Properties: %s\" % state)\n c = mongo.db[app.config['HISTORY_COLLECTION']]\n roast_id = paranoid_clean(state.get('id'))\n item = c.find_one({'_id': ObjectId(roast_id)}, {'_id': 0})\n if not item:\n return jsonify({'success': False, 'message': 'No such roast.'})\n item = {'notes': state.get('notes'),\n 'input_weight': state.get('input_weight'),\n 'output_weight': state.get('output_weight')}\n c.update({'_id': ObjectId(roast_id)}, {'$set': item})\n return jsonify({'success': True})", "def update(self, instance, validated_data):\n # instance.id = validated_data.get('id', instance.id)\n instance.teeth = validated_data.get('teeth', instance.teeth)\n instance.species = validated_data.get('species', instance.species)\n instance.save()\n return instance", "def update(self, *args, **kwargs):\n if args:\n my_list = ['id', 'width', 'height', 'x', 'y']\n for i in range(len(args)):\n setattr(self, my_list[i], args[i])\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)" ]
[ "0.60556465", "0.5540154", "0.5538673", "0.5526356", "0.5473278", "0.54686195", "0.54657865", "0.5454497", "0.54063725", "0.52172124", "0.5209413", "0.5209413", "0.5175859", "0.5163642", "0.5153528", "0.5133642", "0.5127958", "0.5118953", "0.5105042", "0.5085077", "0.5072867", "0.5069584", "0.50606894", "0.5057082", "0.5056525", "0.5025677", "0.5009025", "0.49848494", "0.49797937", "0.4979132" ]
0.5941151
1
Mutates user supplied Boat entity properties by id. Unaddressed properties, where allowed, become None (null). Returns updated Boat entity json string.
def put(self, id=None): if id: boat = test4ValidEntity(id) if boat == None: self.response.set_status(404) else: boat_data = json.loads(self.request.body) if 'name' in boat_data: boat.name = boat_data['name'] else: boat.name = None if 'type' in boat_data: boat.type = boat_data['type'] else: boat.type = None if 'length' in boat_data: boat.length = boat_data['length'] else: boat.length = None boat.put() boat_dict = boat.to_dict() self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(boat_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def fix_id(entity_json):\n entity_json['object_id'] = entity_json['id']\n del entity_json['id']\n return entity_json", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def update(self, commit=True, **kwargs):\n # Prevent changing IDS\n kwargs.pop('id', None)\n for attr, value in kwargs.iteritems():\n # Flask-restful makes everything None by default\n if value is not None:\n setattr(self, attr, value)\n return commit and self.save() or self", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def update_user_obj(user_id=None):\n dic = {}\n list_key = ['id', 'email', 'created_at', 'updated_at']\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n for key, value in dic.items():\n if key not in list_key:\n setattr(obj, key, value)\n obj.save()\n return jsonify(obj.to_dict()), 200", "def get_by_id(self, id):\n user = super(ExtendedUsersService, self).get_by_id(id)\n user.first_name = 'John' + str(id)\n user.last_name = 'Smith' + str(id)\n user.gender = 'male'\n return user", "def updateamenity(amenity_id):\n obj = storage.get(Amenity, amenity_id)\n if obj is None:\n abort(404)\n s = request.get_json(silent=True)\n if s is None:\n abort(400, \"Not a Json\")\n for key, value in s.items():\n list_ignore = [\"id\", \"created_at\", \"updated_at\"]\n if key not in list_ignore:\n setattr(obj, key, value)\n # setting attribute to be what's passed in\n obj.save()\n return jsonify(obj.to_dict()), 200", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def test_api_object_update_property(self, api_object):\n attrs_dict = {'uuid_': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.uuid_ != 'CREATING'", "def update_entity(self, entity_obj):\n if (\n type(entity_obj) is not dict\n or \"entity_id\" not in entity_obj\n or \"mentions\" not in entity_obj\n ):\n raise ValueError(\n \"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as \"\n \"you are replacing the entity information in bulk.\"\n )\n if not self._entity_symbols.qid_exists(entity_obj[\"entity_id\"]):\n raise ValueError(f\"The entity {entity_obj['entity_id']} is not in our dump\")\n try:\n ent = EntityObj(\n entity_id=entity_obj[\"entity_id\"],\n mentions=entity_obj[\"mentions\"],\n title=entity_obj.get(\"title\", entity_obj[\"entity_id\"]),\n types=entity_obj.get(\"types\", {}),\n relations=entity_obj.get(\"relations\", []),\n )\n except ValidationError as e:\n print(e.json())\n raise e\n # Update mentions\n for men in self.get_mentions(ent.entity_id):\n self._entity_symbols.remove_alias(ent.entity_id, men)\n for men in ent.mentions:\n # Lower case mentions for mention extraction\n men = [get_lnrm(men[0], strip=True, lower=True), men[1]]\n self._entity_symbols.add_alias(ent.entity_id, men)\n # Update title\n self._entity_symbols.set_title(ent.entity_id, ent.title)\n # Update types\n for type_sys in self._type_systems:\n for typename in self._type_systems[type_sys].get_types(ent.entity_id):\n self._type_systems[type_sys].remove_type(ent.entity_id, typename)\n for type_sys in ent.types:\n for typename in ent.types[type_sys]:\n self._type_systems[type_sys].add_type(ent.entity_id, typename)\n # Update KG\n if self._kg_symbols is not None:\n for rel in self._kg_symbols.get_relations(ent.entity_id):\n for qid2 in self._kg_symbols.get_connections_by_relation(\n ent.entity_id, rel\n ):\n self._kg_symbols.remove_kg(ent.entity_id, rel, qid2)\n for rel_pair in ent.relations:\n self._kg_symbols.add_kg(\n ent.entity_id, rel_pair[\"relation\"], rel_pair[\"object\"]\n )", "def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})", "def put(self, obj):\n\n if obj is None:\n return\n\n assert isinstance(obj, str), (\n f\"object is not of type string, \"\n f\"but {type(obj)} for fly identifier attribute\")\n\n obj = obj.strip()\n\n return obj", "def put(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for requested Slip number already in use. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n for match in results:\n if slip_data['number'] == match.number:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n else:\n slip.current_boat = None\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n else:\n slip.arrival_date = None\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n else:\n slip.departed_boat = None\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n else:\n slip.departure_date = None\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def update_properties():\n state = request.get_json()\n if 'id' not in state:\n return jsonify({'success': False,\n 'error': 'ID not found in request!'})\n logger.debug(\"Updated Roast Properties: %s\" % state)\n c = mongo.db[app.config['HISTORY_COLLECTION']]\n roast_id = paranoid_clean(state.get('id'))\n item = c.find_one({'_id': ObjectId(roast_id)}, {'_id': 0})\n if not item:\n return jsonify({'success': False, 'message': 'No such roast.'})\n item = {'notes': state.get('notes'),\n 'input_weight': state.get('input_weight'),\n 'output_weight': state.get('output_weight')}\n c.update({'_id': ObjectId(roast_id)}, {'$set': item})\n return jsonify({'success': True})", "def patch(self, entity_id=None, **kwargs):\n entity = self.fetcher.get_entity(entity_id, **kwargs)\n if not entity:\n raise NotFound\n self._verify_etag(entity)\n\n incoming_data, errors = self._patch_schema().load(self.patch_data)\n if errors:\n raise FlumpUnprocessableEntity(errors=errors)\n\n entity = self.orm_integration.update_entity(entity,\n incoming_data.attributes)\n entity_data = self._build_entity_data(entity)\n response_data = ResponseData(entity_data, {'self': request.url})\n\n data, _ = self.response_schema(strict=True).dump(response_data)\n response = jsonify(data)\n response.set_etag(str(entity_data.meta.etag))\n return response, 200", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500", "def patch(id):\n\n if not request.json or not 'name' in request.json:\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name is required\"\n }), 400)\n\n data = request.get_json(force=True)\n if isinstance(data['name'], int):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Name should be of type strings\"\n }), 400)\n\n if Party.get_party_by_name(data[\"name\"]):\n return make_response(jsonify({\n \"status\": 409,\n \"error\": \"Party name already taken\"\n }), 409)\n if Validate.validate_empty_string(data_inputed=data[\"name\"]):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name cannot be empty\"\n }), 400)\n update_data = request.get_json(force=True)\n party_to_edit = Party.get_party_by_id(id=id)[0]\n party_to_edit = Party.update_party(update_data=update_data,id=id)\n return make_response(jsonify({\n \"status\": 201,\n \"data\": party_to_edit\n }), 201)", "def update_attrs(self, is_replace_attrs=True, is_allow_none=True,\n is_replace_dicts_values=False, **attrs):\n from lib.entities import entities_factory\n return (entities_factory.EntitiesFactory().\n update_objs_attrs_values_by_entered_data(\n objs=self, is_replace_attrs_values=is_replace_attrs,\n is_allow_none_values=is_allow_none,\n is_replace_values_of_dicts=is_replace_dicts_values, **attrs))", "def patch_record(\n self, id_: str, fields: Dict[str, Union[str, list, None]]\n ) -> None:\n instance = self._get(id_)\n instance.update(fields)\n self.db.session.commit()", "def put(self, obj):\n\n if obj is None:\n return\n\n assert isinstance(obj, str), (\n f\"object is not of type string, \"\n f\"but {type(obj)} for phone attribute\")\n\n obj = obj.strip()\n\n return obj", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def update_amenity_obj(amenity_id=None):\n dic = {}\n obj = storage.get(\"Amenity\", amenity_id)\n if obj is None:\n abort(404)\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n for key, value in dic.items():\n setattr(obj, key, value)\n storage.save()\n return jsonify(obj.to_dict()), 200", "def update(cls, dto: dict):\n entity = cls.from_dict(dto)\n try:\n valid_entity = cls.find_by_id(dto[\"id\"])\n except KeyError as e:\n raise AppException(\"Can't find key {}\".format(e))\n\n if not valid_entity:\n return None\n\n # validate creation your creation.\n entity.creation_validation()\n\n # Copy all attributes from entity to valid_entity.\n valid_entity << entity\n\n return valid_entity", "def patch_user(user_id):\n success = True\n try:\n usr = db.session.query(User).get(user_id)\n for item in request.json:\n if item == 'username':\n usr.username = request.json['username']\n elif item == 'email':\n usr.username = request.json['email']\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def update_json(self):\n self.set_version_to_default()\n self.remove_null_fields()\n self.remove_unnecessary_keys()\n self.set_fromVersion(from_version=self.from_version)", "def update_animal():\n\n animal_uuid = request.args.get(\"uuid\", default=None, type=str)\n animal = json.loads(rd.get(animal_uuid))\n\n new_animal_body = request.args.get(\"body\", default=None, type=str)\n if new_animal_body is not None:\n animal[\"body\"] = new_animal_body\n\n new_animal_arms = request.args.get(\"arms\", default=None, type=int)\n if new_animal_body is not None:\n animal[\"arms\"] = new_animal_arms\n\n new_animal_legs = request.args.get(\"legs\", default=None, type=int)\n if new_animal_legs is not None:\n animal[\"legs\"] = new_animal_legs\n\n new_animal_tails = request.args.get(\"tails\", default=None, type=int)\n if new_animal_tails is not None:\n animal[\"tails\"] = new_animal_tails\n\n rd.set(animal_uuid, json.dumps(animal))\n return animal" ]
[ "0.642934", "0.58710945", "0.57078266", "0.5431871", "0.5274404", "0.5214651", "0.5195765", "0.5161442", "0.50988567", "0.5096104", "0.50762004", "0.5066248", "0.50660175", "0.5055261", "0.5034231", "0.5026359", "0.502098", "0.50012106", "0.49968147", "0.49965915", "0.49895957", "0.4989225", "0.49805", "0.49721235", "0.49700305", "0.4944808", "0.48975012", "0.48940054", "0.48921117", "0.48853695" ]
0.61614597
1
Returns an array of json objects representing all Boat entities.
def get(self): query = Boat.query() results = query.fetch(limit = MAX_BOATS) boat_dicts = [] for match in results: boat_dicts.append({'id': match.id, 'name': match.name, 'type': match.type, 'length': match.length, 'at_sea': match.at_sea }) self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(boat_dicts))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return {'bills': [bill.json() for bill in BillModel.find_all()]}", "def jsonify_all(cls):\n return jsonify(accounts=[account.as_dict() for account in cls.query.all()])", "def TOBS():\n session = Session(engine)\n # Query all passengers\n\n TOBS = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date >= '2010-08-23').all()\n\n # Convert list of tuples into normal list\n all_TOBS = list(np.ravel(TOBS))\n\n return jsonify(all_TOBS)", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)", "def get_all_bookings():\n # get all the bookings for userid\n bookings = Booking.query.all()\n # dump in the Schema\n results = bookingsSchema.dump(bookings)\n\n return jsonify(results)", "def get_tags():\n\treturn jsonify(tags=[i.serialise for i in Tag.query.all()])", "def allCategoriesJSON():\n categories = db_session.query(Category).all()\n return jsonify(categories=[c.serialize for c in categories])", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data", "def bentity_list(request, format='csv'):\n \n \n bentities = Bentity.objects.all().order_by('bentity')\n \n \n if format == 'csv':\n # Serislize CSV for API\n return CSVResponse(\n [{'bentity_id': b.gid, 'bentity_name': b.bentity} for b in bentities],\n ('bentity_id', 'bentity_name') )\n \n else:\n # Serialize JSON for bentity-list widget\n json_objects = [{\n 'key': b.gid,\n 'display': b.bentity,\n } for b in bentities]\n \n return JSONResponse({'bentities' : json_objects})", "def tobs():\n date = dt.datetime(2017, 8, 23)\n year_ago = date - dt.timedelta(days=365)\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= year_ago).\\\n filter(Measurement.date <= date).\\\n order_by(Measurement.date).all()\n\n # Convert list of tuples into normal list\n all_tobs = list(np.ravel(results))\n\n return jsonify(all_tobs)", "def tobs():\n # Query all stations\n sel3 = [Measurement.tobs]\n tob = session.query(*sel3).\\\n filter(Measurement.date.between ('2016-08-23', '2017-08-23')).\\\n group_by(Measurement.date).\\\n order_by(Measurement.date).all()\n\n # Create a list of all_tobs\n\n all_tobs= list(np.ravel(stations))\n\n return jsonify(all_tobs)", "def get_all_data():\n return jsonify(service.get_all_data())", "def tobs():\n # Query all all dates and temperature observations from the last year\n tobs_list = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= \"2016-08-23\").filter(Measurement.station == 'USC00519281').order_by(Measurement.date).all()\n # Convert list of tuples into normal list\n all_tobs = list(np.ravel(tobs_list))\n\n return jsonify(all_tobs)", "def asset_list():\n new_list = []\n for asset_name in app.bank:\n new_list.append(app.bank[asset_name].to_list())\n new_list = sorted(new_list, key=lambda s: s[0])\n return jsonify(new_list)", "def api_asset_list():\n return jsonify(app.bank.to_list()), 200", "def jsons(self):\n if self._jsons is None:\n self.make_jsons()\n return self._jsons", "def categoriesJSON():\n categories = session.query(Category).all()\n return jsonify(Categories=[r.serialize for r in categories])", "def tobs():\n # Query all tobs for the previous year\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= '2016-10-01').all()\n\n all_tobs = []\n\n for result in results:\n tobs_dict = {}\n tobs_dict[\"date\"] = result[0]\n tobs_dict[\"tobs\"] = result[1]\n all_tobs.append(tobs_dict)\n return jsonify(all_tobs)", "def getCategoriesJSON():\r\n categories = session.query(Category).all()\r\n return jsonify(Categories=[c.serialize for c in categories])", "def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()", "def tobs():\n \n # Create session\n session = Session(engine)\n \n # Retrieve the most recent date\n result = session.query(Measurement).order_by(Measurement.date.desc()).limit(1)\n for row in result:\n latest_date = row.date\n\n # Convert to datetime format\n conv_latest_date = dt.datetime.strptime(latest_date , '%Y-%m-%d')\n \n # Calculate the date 1 year ago from the last data point in the database\n last_12_months = conv_latest_date - dt.timedelta(days=365)\n\n # Convert datetime to string\n conv_last_12_months = last_12_months.strftime('%Y-%m-%d')\n \n # Query\n temp_obv = session.query(Measurement.date, Measurement.tobs).\\\n filter(and_(Measurement.date <= latest_date, Measurement.date >= conv_last_12_months)).all()\n\n\n return jsonify(dict(temp_obv))", "def tobs():\n \n # Obtain the current year from the date and using that date determine the previous year appending 01-01 and 12-31\n compare_date = dt.date.today()\n start_date = f\"{compare_date.year - 1}-01-01\"\n end_date = f\"{compare_date.year - 1}-12-31\"\n \n tobs_result = session.query(Measurement.tobs).filter((Measurement.date >= start_date) & (Measurement.date <= end_date)\n ).order_by(Measurement.date).all()\n \n tobs = []\n tobs = list(np.ravel(tobs_result))\n return jsonify(tobs)", "def jugadores():\n\tjugadores = Jugador.query.order_by(Jugador.id.desc()).filter_by(activo=True)\n\treturn jsonify([jugador.to_dict()\n\t\t for jugador in jugadores])", "def list():\n trucks = Foodtruck.query.all()\n return jsonify(foodtrucks=[truck.to_dict() for truck in trucks])", "def all(cls):\n api = BuslineAPI()\n try:\n objects = api.all()\n except ApiException:\n objects = cls.objects.all()\n return objects", "def restaurants_all() -> str:\n restaurant_objects = restaurants.load_restaurants()\n return jsonify(restaurant_objects)", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def get_all(cls):\n\t\treturn [el._to_dict() for el in Book.query.all()]", "def tobs():\n # Query all the stations and for the given date. \n results = session.query(Measurement.station, Measurement.date, Measurement.tobs). group_by(Measurement.date). filter(Measurement.date > begin_date). order_by(Measurement.station).all()\n \n # Create a dictionary from the row data and append to a list of for the temperature data.\n tob_data = []\n for tobs_data in results:\n tobs_dict = {}\n tobs_dict[\"Station\"] = tobs_data.station\n tobs_dict[\"Date\"] = tobs_data.date\n tobs_dict[\"Temperature\"] = tobs_data.tobs\n tob_data.append(tobs_dict)\n \n return jsonify(tob_data)" ]
[ "0.66011316", "0.65072656", "0.63690925", "0.63595057", "0.62545437", "0.6199981", "0.6179565", "0.6169542", "0.613204", "0.61151505", "0.6094793", "0.608647", "0.6081628", "0.6066895", "0.6056461", "0.6009723", "0.5975167", "0.5933342", "0.5926198", "0.59116733", "0.58876383", "0.58770424", "0.5875137", "0.58393705", "0.58006406", "0.57991785", "0.57957965", "0.57942075", "0.5790955", "0.5767945" ]
0.681285
0
Returns json string with Slip entity details by id.
def get(self, id=None): if id: slip = test4ValidEntity(id) if slip == None: self.response.set_status(404) else: slip_dict = slip.to_dict() slip_dict['departure_history'] = {} slip_dict['departure_history']['departure_date'] = slip.departure_date slip_dict['departure_history']['departed_boat'] = slip.departed_boat del slip_dict['departed_boat'], slip_dict['departure_date'] self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(slip_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item(id):\n return jsonify(id=id, name='name', number=123)", "def get(self, id):\n return {'id': id}", "def get_one_stock(id):\r\n print(\"<get_one_stock()>\")\r\n print(\"id: \", id)\r\n stocks = Stock.objects(id=id).first()\r\n return jsonify(json.loads(stocks.to_json()))", "def item_json(item_id):\n item_details_json = {}\n try:\n item_in_db = session.query(Item).filter_by(id=item_id).one()\n item_details_json['item'] = item_in_db.serialize\n except Exception as e:\n item_details_json['result'] = 'No data for item ID ' \\\n + str(item_id) + ': ' + str(e)\n return jsonify(item_details_json)", "def detail(id):\n program = Programa.query.get(id)\n return programa_schema.jsonify(program), 200", "def get_entity_by_id(self, id):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers_ld)\n return r.json()", "def json_format_by_id(id):\n fmt = Format.query.filter(Format.id==id).first()\n if fmt is None:\n abort(404)\n return jsonify(fmt.get_public_dict())", "def get_item_by_id(request, pk):\n item = get_object_or_404(StockItem, pk=pk)\n res_dict = {\n 'id': item.id,\n 'name': item.name,\n 'count': item.count,\n 'date_added': item.date_added,\n 'exp': item.date_of_expiration,\n 'added_by': item.added_by,\n 'cat': str(item.fk_category),\n 'subcat': str(item.fk_subcategory),\n 'notes': item.notes\n }\n return JsonResponse(res_dict)", "def itemCatalogJSON(sport_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n items = session.query(Item).filter_by(\n sport_id=sport_id).all()\n return jsonify(Sport=[i.serialize for i in items])", "def catalogItemJSON(sport_id, item_id):\n\n catalogItem = session.query(Item).filter_by(id=item_id).one()\n return jsonify(Item=catalogItem.serialize)", "def get(self, _id):\n if _id is None:\n return jsonify([user.serialize() for user in Goal.query.all()])\n else:\n return jsonify(Goal.query.filter_by(id=_id).all())", "def get(id):\n elements = Advertisements().get_one_element(id)\n data = jsonify(elements)\n if data is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n data.statut_code = 200\n return data", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def companies_by_id(id):\n res = requests.get('http://0.0.0.0:5002/company/{}/products'.format(id))\n return jsonify(res.json())", "def get(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def put(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for requested Slip number already in use. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n for match in results:\n if slip_data['number'] == match.number:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n else:\n slip.current_boat = None\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n else:\n slip.arrival_date = None\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n else:\n slip.departed_boat = None\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n else:\n slip.departure_date = None\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def get(self, id):\n ticket = Ticket.query.filter_by(id=id).one()\n\n return jsonify(ticket)", "def item_json(item_id):\n try:\n item = session.query(Item).filter_by(id=item_id).one()\n return jsonify(item=item.serialize)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def info(self, id):", "def json(self):\n if self.resource:\n return jsonify(self.resource)\n return jsonify({'id': self.id})", "def jugador_detalle(id):\n\treturn jsonify(Jugador.query.get_or_404(id).to_dict())", "def get_json_info_sticker(self, st_id):\n remake = remake_url(self.get_json_cursor)\n json_url = remake.remake_sticker_url(st_id)\n response = get_responses(json_url)\n return self.return_response(response, st_id)", "def serialize(self):\n return {\n \"id\": self.id,\n \"sid\": self.sid,\n \"sku\": self.sku,\n \"name\": self.name,\n \"price\": self.price,\n \"amount\": self.amount,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }", "def get_entity_by_id(self, id):\n url = '{}/iot/devices/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers)\n return r.json()", "def get_animal_details_by_id():\n animal_id = request.args.get('animalId')\n try:\n animal = Animal.object_as_dict(Animal.get_animal_by_id(animal_id))\n return jsonify(animal)\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_json(self):\n return serialize_list_by_id(self.id)", "def json(self):\n return {\n 'id': self.id,\n 'name': self.name\n }" ]
[ "0.66123515", "0.6180184", "0.5928225", "0.5881289", "0.58054954", "0.57594967", "0.5753017", "0.5671553", "0.56611335", "0.5634447", "0.56203264", "0.5611418", "0.55989206", "0.5598792", "0.5573988", "0.55618095", "0.55421466", "0.5537874", "0.55092967", "0.54937154", "0.5485743", "0.54828805", "0.5456373", "0.545566", "0.54366857", "0.54273933", "0.5413177", "0.5403467", "0.54004776", "0.5394864" ]
0.67899144
0
Deletes a Slip entity. If slip was occupied, returns that Boat entity's json string.
def delete(self, id=None): if id: slip = test4ValidEntity(id) if slip == None: self.response.set_status(404) else: if slip.current_boat != None: """ Tests for a Boat "docked" in slip to be deleted. if found, sets the Boat entity at_sea property to True and deletes the slip. """ boat_dict = None query = Boat.query(Boat.at_sea == False) results = query.fetch(limit = MAX_BOATS) for match in results: if slip.current_boat == match.id: match.at_sea = True match.put() slip.key.delete() self.response.write("Slip has been deleted!") else: slip.key.delete() self.response.write("Slip has been deleted!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Delete(self):\n self.__context.builder.BlipDelete(self.GetWaveId(),\n self.GetWaveletId(),\n self.GetId())\n return self.__context.RemoveBlip(self.GetId())", "def remove(self, spo, context=None):\n\n uri = self.rest_services[\"statements\"]\n s,p,o = spo\n payload = dict()\n if s:\n payload[\"subj\"] = s.n3()\n if p:\n payload[\"pred\"] = p.n3()\n if o:\n payload[\"obj\"] = o.n3()\n if context:\n payload[\"context\"] = [context.n3()]\n\n #data = \" \".join(i.n3() for i in spo) +\" .\"\n #print(data)\n r = requests.delete(uri, params=payload)", "def BlipDelete(self, wave_id, wavelet_id, blip_id):\n op = Operation(BLIP_DELETE, wave_id, wavelet_id, blip_id=blip_id)\n self.__context.AddOperation(op)", "def delete(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n if boat.at_sea == False:\n query = Slip.query(Slip.current_boat == boat.id)\n result = query.fetch(limit = 1)\n for match in result:\n match.current_boat = None\n match.arrival_date = None\n match.put()\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\") \n else:\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\")", "def delete_one_stock(id):\r\n print(\"<delete_one_stock()>\")\r\n print(\"id: \", id)\r\n stocks = Stock.objects(id=id)\r\n stocks.delete()\r\n return jsonify(json.loads(stocks.to_json()))", "def delete_clothes(payload, clothes_id):\n clothes = Clothes.query.get(clothes_id)\n # exception for not existing id\n if clothes is None:\n abort(404)\n # set error status\n error = False\n # delete the given clothes\n try:\n clothes.delete()\n except Exception:\n clothes.rollback()\n error = True\n print(sys.exc_info())\n finally:\n clothes.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': clothes_id\n })", "def delete(self):\n return self.service.delete_one({\"_id\": self._id})", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def slo_delete(obj, product_name, slo_id):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slo = client.slo_list(product, id=slo_id)\n if not slo:\n fatal_error('SLO {} does not exist'.format(slo_id))\n\n slo = slo[0]\n\n if product['name'] != slo['product_name']:\n fatal_error('Cannot delete SLO {} as it does not belong to product {}'.format(slo_id, product_name))\n\n with Action('Deleting SLO: {}'.format(slo['uri']), nl=True):\n client.slo_delete(slo)", "def delete(self, sku, page=None):\n _data = api_parser.parse_args()\n\n product = Product.query.filter(Product.sku == _data['sku']).first_or_404()\n db.session.delete(product)\n db.session.commit()\n\n return jsonify(status='DELETED')", "def sli_delete(obj, product_name, name):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slis = client.sli_list(product, name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n with Action('Deleting SLI: {} for product {}'.format(name, product['name']), nl=True) as act:\n try:\n client.sli_delete(slis[0])\n except SLRClientError as e:\n act.fatal_error(e)", "def delete(self):\n return self.get_data()", "def delete_item(id):\n return '', 201", "def remove_entity(self, entity):\n #if entity.mark != -1:\n #print \"in EntityList: remove_entity: trying to remove entity with mark %d\" % entity.mark\n return self.remove(entity)", "def remove(self):\n instance = self.get_object() \n instance.delete() \n return self.response(status='Successfully Delete')", "def delete(self):\n url = util.join_url(self.path, str(self['id']))\n new_attributes = self.api.delete(url)\n self.error = None\n self.merge(new_attributes)\n return self.success()", "def test_delete_pet(self):\n headers = [('api_key', 'api_key_example')]\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='DELETE',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def delete(self, product):\n product_id = str(product)\n\n\n if product_id in self.sepet:\n del self.sepet[product_id]\n print(product_id)\n self.session.modified=True", "def delete_single_list(current_user, id):\n\n try:\n int(id)\n except ValueError:\n return response('failed', 'Please provide a valid ShoppingList Id', 400)\n else:\n shoplist = ShoppingList.query.filter_by(user_id=current_user.id, id=id).first()\n if shoplist is not None:\n db.session.delete(shoplist)\n db.session.commit()\n return response('success', 'Shopping list has been deleted', 200)\n return response('failed', 'Shopping list not found', 404)\n\n\n\n # decorator used to allow cross origin requests", "def delete(self, request, *args, **kwargs):\n if self.json:\n self.object = self.get_object()\n obj = copy.deepcopy(self.object)\n self.object.delete()\n return self.json_to_response(data=obj)\n else:\n return super().delete(request, *args, **kwargs)", "def delete(self, pet_id):\n app.logger.info('Request to Delete a pet with id [%s]', pet_id)\n pet = Pet.find(pet_id)\n if pet:\n pet.delete()\n return '', status.HTTP_204_NO_CONTENT", "def delete(self):\n data = request.get_json()\n\n if data is None:\n raise ClientDataError('Must include request data')\n\n event_id = data.get('id', None)\n bike_id = data.get('bike_id', None)\n\n if event_id is None:\n raise ClientDataError('Must include event id', 400)\n if bike_id is None:\n raise ClientDataError('Must include bike id', 400)\n\n event = MaintenanceEvent.query.get_or_404(event_id)\n bike = Bike.query.get_or_404(event.bike_id)\n if bike.id != bike_id:\n raise ClientDataError('Event does not belong to the given bike', 400)\n if bike.user_id != g.user.id:\n return None, 403\n\n try:\n db.session.delete(event)\n db.session.commit()\n except DataError:\n db.session.rollback()\n return None, 400\n except DataBaseError:\n db.session.rollback()\n return None, 500\n\n return {'id': event_id}, 200", "def delete(self, op_id: str) -> Response:\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n try:\n output = Opportunity.objects.get(id=op_id).delete()\n except ValidationError as e:\n return bad_request(e.message)\n return jsonify(output)\n else:\n return forbidden()", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def delete_pet(self, p_name):\r\n for pets in range(0, len(self.pet_file)):\r\n if self.pet_file[pets][\"pet name\"] == p_name:\r\n self.pet_file.pop(pets) # remove requested pet dict\r\n break # leave 'for loop' to prevent out of index error\r\n with open(self.pet_file_name, 'w') as outfile:\r\n json.dump(self.pet_file, outfile) # confirm changes in json file\r", "def _delete(self, table, _id, return_item=False):\n data = {\"Key\": _id, \"ReturnValues\": \"ALL_OLD\" if return_item else \"NONE\"}\n\n return self._response_handler(table, \"delete_item\", data)", "def delete(self, product):\n product_id = str(product)\n\n if product_id in self.basket:\n del self.basket[product_id]\n #print(product_id)\n self.save()", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def delete(self):\n return self.request('', pylastica.request.Request.DELETE)", "def favourite_delete(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Remove the thrower from the logged in thrower's favourites and return\n\t\t#\tthe result\n\t\treturn Services.Effect(\n\t\t\tFavourites.remove(sesh['thrower']['_id'], data['id'])\n\t\t)" ]
[ "0.60876894", "0.57072854", "0.5664537", "0.5424277", "0.53834593", "0.53725517", "0.53673273", "0.5352611", "0.5321147", "0.5274949", "0.5266084", "0.5247238", "0.52407867", "0.52098083", "0.5192416", "0.5168681", "0.51632714", "0.51488656", "0.5146871", "0.51223814", "0.5112351", "0.509571", "0.508468", "0.50709337", "0.5068131", "0.50435144", "0.5038931", "0.5029719", "0.5026735", "0.5016997" ]
0.6559253
0
Mutates user supplied Slip entity properties by id. Unaddressed properties remain. Returns updated Slip entity json string.
def patch(self, id=None): if id: slip = test4ValidEntity(id) if slip == None: self.response.set_status(404) else: slip_data = json.loads(self.request.body) if 'number' in slip_data: """ Test for Slip number already taken. """ query = Slip.query() results = query.fetch(limit = MAX_SLIPS) if slip.number in results: slip.number = getSlipNum() else: slip.number = slip_data['number'] if 'current_boat' in slip_data: if slip.current_boat == None: slip.current_boat = slip_data['current_boat'] else: """ Query for the Boat and change at_sea to False. """ query = Boat.query(Boat.id == slip_data['current_boat']) result = query.fetch(limit = 1) if 'at_sea' in result: result.at_sea = False slip.current_boat = slip_data['current_boat'] if 'arrival_date' in slip_data: slip.arrival_date = slip_data['arrival_date'] if 'departed_boat' in slip_data: slip.departed_boat = slip_data['departed_boat'] if 'departure_date' in slip_data: slip.departure_date = slip_data['departure_date'] slip.put() slip_dict = slip.to_dict() del slip_dict['departure_history'] self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(slip_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for requested Slip number already in use. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n for match in results:\n if slip_data['number'] == match.number:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n else:\n slip.current_boat = None\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n else:\n slip.arrival_date = None\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n else:\n slip.departed_boat = None\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n else:\n slip.departure_date = None\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def fix_id(entity_json):\n entity_json['object_id'] = entity_json['id']\n del entity_json['id']\n return entity_json", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def update(table, id_):\n\n # your code\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Release date: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def update_nuxeo_properties(self, data, **documentid):\n uid = ''\n if len(documentid) != 1:\n raise TypeError(\"either uid or path\")\n if 'path' in documentid:\n uid = self.get_uid(documentid['path'])\n elif 'uid' in documentid:\n uid = documentid['uid']\n url = u'/'.join([self.conf['api'], \"id\", uid])\n headers = self.document_property_headers\n headers.update({'Content-Type': 'application/json'})\n\n # copy what we want from the input json into the payload\n payload = {}\n payload['uid'] = uid\n payload['entity-type'] = data.get('entity-type', 'document')\n payload['properties'] = data['properties']\n res = self.http.put(\n url, data=json.dumps(payload), auth=self.auth, headers=headers)\n res.raise_for_status()\n r2 = self.http.get(url, auth=self.auth, headers=headers)\n r2.raise_for_status()\n return json.loads(r2.content)", "def slo_update(obj, product_name, slo_id, title, description, slo_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slo = client.slo_list(product, id=slo_id)\n if not slo:\n fatal_error('SLO {} does not exist'.format(slo_id))\n\n slo = slo[0]\n\n with Action('Updating SLO {} for product {}'.format(slo_id, slo['product_name']), nl=True) as act:\n if slo_file:\n slo = json.load(slo_file)\n slo['uri'] = slo['uri']\n else:\n if title:\n slo['title'] = title\n if description:\n slo['description'] = description\n\n validate_slo(slo, act)\n\n if not act.errors:\n slo = client.slo_update(slo)\n\n print(json.dumps(slo, indent=4))", "def patch(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n result = []\r\n status_code = 204\r\n try:\r\n # This only returns a value (boolean) for \"op\": \"test\"\r\n result = patch_item(song, request.get_json())\r\n db.session.commit()\r\n except Exception:\r\n # If any other exceptions happened during the patching, we'll return 422\r\n result = {\"success\": False, \"error\": \"Could not apply patch\"}\r\n status_code = 422\r\n\r\n return make_response(jsonify(result), status_code)", "def write(self, id, data):\n return self._call('%s.update' % self._shopware_model,\n [int(id), data])", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "async def modify(self, pak_id: str, props: Property, source: str) -> ItemVariant:\n vbsp_config: lazy_conf.LazyConf\n if 'config' in props:\n # Item.parse() has resolved this to the actual config.\n vbsp_config = get_config(\n props,\n 'items',\n pak_id,\n )\n else:\n vbsp_config = self.vbsp_config\n\n if 'replace' in props:\n # Replace property values in the config via regex.\n vbsp_config = lazy_conf.replace(vbsp_config, [\n (re.compile(prop.real_name, re.IGNORECASE), prop.value)\n for prop in\n props.find_children('Replace')\n ])\n\n vbsp_config = lazy_conf.concat(vbsp_config, get_config(\n props,\n 'items',\n pak_id,\n prop_name='append',\n ))\n\n if 'description' in props:\n desc = desc_parse(props, source, pak_id)\n else:\n desc = self.desc\n\n if 'appenddesc' in props:\n desc = tkMarkdown.join(\n desc,\n desc_parse(props, source, pak_id, prop_name='appenddesc'),\n )\n\n if 'authors' in props:\n authors = sep_values(props['authors', ''])\n else:\n authors = self.authors\n\n if 'tags' in props:\n tags = sep_values(props['tags', ''])\n else:\n tags = self.tags.copy()\n\n variant = ItemVariant(\n pak_id,\n self.editor,\n vbsp_config,\n self.editor_extra.copy(),\n authors=authors,\n tags=tags,\n desc=desc,\n icons=self.icons.copy(),\n ent_count=props['ent_count', self.ent_count],\n url=props['url', self.url],\n all_name=self.all_name,\n all_icon=self.all_icon,\n source=f'{source} from {self.source}',\n )\n [variant.editor] = variant._modify_editoritems(\n props,\n [variant.editor],\n pak_id,\n source,\n is_extra=False,\n )\n\n if 'extra' in props:\n variant.editor_extra = variant._modify_editoritems(\n props.find_key('extra'),\n variant.editor_extra,\n pak_id,\n source,\n is_extra=True\n )\n\n return variant", "def copy_from_entity(self, entity):\n for prop in entity._EndpointsPropertyItervalues():\n attr_name = prop._code_name\n value = getattr(entity, attr_name)\n if value is not None:\n if isinstance(prop, properties.EndpointsAliasProperty):\n value_set = getattr(self, attr_name) is not None\n elif isinstance(prop, ComputedProperty):\n value_set = True\n else:\n value_set = prop._name in self._values\n if not value_set:\n setattr(self, attr_name, value)", "def from_entity(cls, e):\n kwargs = {name: e.get(name) for name, prop in cls._properties.items() if prop.is_id} # we need the id value\n obj = cls(**kwargs)\n obj._key = e.key\n\n for name, prop in cls._properties.items(): # set values\n if not prop.is_id:\n obj[name] = e.get(name)\n\n return obj", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def update_properties():\n state = request.get_json()\n if 'id' not in state:\n return jsonify({'success': False,\n 'error': 'ID not found in request!'})\n logger.debug(\"Updated Roast Properties: %s\" % state)\n c = mongo.db[app.config['HISTORY_COLLECTION']]\n roast_id = paranoid_clean(state.get('id'))\n item = c.find_one({'_id': ObjectId(roast_id)}, {'_id': 0})\n if not item:\n return jsonify({'success': False, 'message': 'No such roast.'})\n item = {'notes': state.get('notes'),\n 'input_weight': state.get('input_weight'),\n 'output_weight': state.get('output_weight')}\n c.update({'_id': ObjectId(roast_id)}, {'$set': item})\n return jsonify({'success': True})", "def setEntityProperty(self, ents, propName, **kwargs):\n spr = smtk.model.SetProperty.create()\n if hasattr(ents, '__iter__'):\n [spr.parameters().associate(ent.component()) for ent in ents]\n else:\n spr.parameters().associate(ents.component())\n spr.parameters().find('name').setValue(propName)\n if 'as_int' in kwargs:\n vlist = kwargs['as_int']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n intVal = spr.parameters().find('integer value')\n intVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n intVal.setValue(i, vlist[i])\n if 'as_float' in kwargs:\n vlist = kwargs['as_float']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n floatVal = spr.parameters().find('float value')\n floatVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n floatVal.setValue(i, vlist[i])\n if 'as_string' in kwargs:\n vlist = kwargs['as_string']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n stringVal = spr.parameters().find('string value')\n stringVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n stringVal.setValue(i, vlist[i])\n res = spr.operate()\n self.assertEqual(\n res.find('outcome').value(0),\n int(smtk.operation.Operation.SUCCEEDED),\n 'set property failed')\n return res.findInt('outcome').value(0)", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def patch(self, entity_id=None, **kwargs):\n entity = self.fetcher.get_entity(entity_id, **kwargs)\n if not entity:\n raise NotFound\n self._verify_etag(entity)\n\n incoming_data, errors = self._patch_schema().load(self.patch_data)\n if errors:\n raise FlumpUnprocessableEntity(errors=errors)\n\n entity = self.orm_integration.update_entity(entity,\n incoming_data.attributes)\n entity_data = self._build_entity_data(entity)\n response_data = ResponseData(entity_data, {'self': request.url})\n\n data, _ = self.response_schema(strict=True).dump(response_data)\n response = jsonify(data)\n response.set_etag(str(entity_data.meta.etag))\n return response, 200", "def _patch(self, interaction: JSON) -> NoReturn:\n self._id = interaction['id']\n\n self._data.patch_dpy(interaction) # Update data.", "def put_record(table, id):\n try:\n my_class = load_entity(table)\n except LoaderError as e:\n abort(400, e)\n\n p = my_class[id]\n for k in request.params:\n setattr(p, k, getattr(request.params, k))\n commit()\n return serialize_entity(p)", "def edit_single_list(current_user, id):\n if request.content_type == 'application/json':\n try:\n int(id)\n except ValueError:\n return response('failed', 'Please provide a valid ShoppingList Id', 400)\n else:\n\n shoplist = ShoppingList.query.filter_by(user_id=current_user.id, id=id).first()\n if shoplist is not None:\n data = request.get_json()\n name = data.get('name')\n description = data.get('description')\n if name:\n if re.match(\"^^([a-zA-Z0-9]+[ \\s])*[a-zA-Z0-9]+$\", name) and name.strip(' ')[0]:\n shoplist.name = name\n shoplist.description = description\n db.session.commit()\n return make_response(jsonify({\n 'name': shoplist.name,\n 'description': shoplist.description,\n 'message': 'Shopping list has been updated'\n\n })), 200\n return response('failed',\n 'Wrong name format. Name cannot contain special characters or start with a space',\n 400)\n return response('failed', 'No name input. Try again', 403)\n return response('failed', 'Shopping list does not exist. Please try again', 404)\n return response('failed', 'Content-type must be json', 202)", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def patch(self, request, slug, **kwargs):\n request.POST._mutable = True\n payload = request.data\n payload.pop('client', None)\n obj = self.get_object()\n # update main image\n updated_main_image = Uploader.upload_image_from_request(request)\n if updated_main_image:\n payload['image_main'] = updated_main_image\n # update image list\n updated_image_list = Uploader.upload_image_batch(\n request, instance=obj)\n if updated_image_list:\n payload.setlist('image_others', updated_image_list)\n # update videos\n video = Uploader.upload_video_from_request(request)\n if video:\n payload['video'] = video\n serializer = self.serializer_class(obj, data=payload, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.update(obj, payload)\n response = {\n \"data\": {\"property\": serializer.data},\n \"message\": \"Successfully updated your property\"\n\n }\n return Response(response)", "def __str__(self):\n return f\"{self.PropertyName, self.Prop_ID}\"", "def get_by_id(self, id):\n user = super(ExtendedUsersService, self).get_by_id(id)\n user.first_name = 'John' + str(id)\n user.last_name = 'Smith' + str(id)\n user.gender = 'male'\n return user", "def pack_entity(e,excludes=None):\n if not e or not hasattr(e,'properties'):\n logging.warning('models.pack_entity(%s), bad entity'%e)\n return {}\n rs = {'id':'%s'%e.key().id_or_name()}\n for pname,ptype in e.properties().items():\n if pname not in excludes:\n pvalue = getattr(e,pname)\n if pvalue is None:\n rs[pname] = ''\n elif isinstance(ptype,db.BooleanProperty):\n rs[pname] = str(pvalue).lower()\n elif isinstance(ptype,db.DateTimeProperty):\n rs[pname] = '%s'%datetime.strftime(pvalue,'%Y-%m-%d %H:%M:%S')\n elif isinstance(ptype,db.TextProperty):\n if pvalue.startswith('{') or pvalue.startswith('['):\n rs[pname] = eval(pvalue)\n else:\n rs[pname] = pvalue\n else:\n rs[pname] = pvalue\n return rs", "def FillInventoryServicePropertiesDuringEscrow(self, entity, request):\n return", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500", "def update(self, *args, **kwargs):\n selves = ['id', 'size', 'x', 'y']\n if args is not None and len(args) is not 0:\n for a in range(len(args)):\n setattr(self, selves[a], args[a])\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)" ]
[ "0.58525246", "0.5443223", "0.5127894", "0.5005448", "0.49910688", "0.4909347", "0.4842506", "0.48118356", "0.4784188", "0.47828564", "0.47294396", "0.46688277", "0.4649701", "0.46442547", "0.46272856", "0.46249476", "0.4617809", "0.45987448", "0.45408234", "0.45397025", "0.45376042", "0.45261502", "0.4480934", "0.44558468", "0.44397438", "0.4439274", "0.44322675", "0.44288743", "0.44124544", "0.44044814" ]
0.61305434
0
Mutates user supplied Slip entity properties by id. Unaddressed properties, where allowed, become None (null). Returns updated Slip entity json string.
def put(self, id=None): if id: slip = test4ValidEntity(id) if slip == None: self.response.set_status(404) else: slip_data = json.loads(self.request.body) if 'number' in slip_data: """ Test for requested Slip number already in use. """ query = Slip.query() results = query.fetch(limit = MAX_SLIPS) for match in results: if slip_data['number'] == match.number: slip.number = getSlipNum() else: slip.number = slip_data['number'] if 'current_boat' in slip_data: if slip.current_boat == None: slip.current_boat = slip_data['current_boat'] else: """ Query for the Boat and change at_sea to False. """ query = Boat.query(Boat.id == slip_data['current_boat']) result = query.fetch(limit = 1) if 'at_sea' in result: result.at_sea = False slip.current_boat = slip_data['current_boat'] else: slip.current_boat = None if 'arrival_date' in slip_data: slip.arrival_date = slip_data['arrival_date'] else: slip.arrival_date = None if 'departed_boat' in slip_data: slip.departed_boat = slip_data['departed_boat'] else: slip.departed_boat = None if 'departure_date' in slip_data: slip.departure_date = slip_data['departure_date'] else: slip.departure_date = None slip.put() slip_dict = slip.to_dict() del slip_dict['departure_history'] self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(slip_dict))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def fix_id(entity_json):\n entity_json['object_id'] = entity_json['id']\n del entity_json['id']\n return entity_json", "def update_nuxeo_properties(self, data, **documentid):\n uid = ''\n if len(documentid) != 1:\n raise TypeError(\"either uid or path\")\n if 'path' in documentid:\n uid = self.get_uid(documentid['path'])\n elif 'uid' in documentid:\n uid = documentid['uid']\n url = u'/'.join([self.conf['api'], \"id\", uid])\n headers = self.document_property_headers\n headers.update({'Content-Type': 'application/json'})\n\n # copy what we want from the input json into the payload\n payload = {}\n payload['uid'] = uid\n payload['entity-type'] = data.get('entity-type', 'document')\n payload['properties'] = data['properties']\n res = self.http.put(\n url, data=json.dumps(payload), auth=self.auth, headers=headers)\n res.raise_for_status()\n r2 = self.http.get(url, auth=self.auth, headers=headers)\n r2.raise_for_status()\n return json.loads(r2.content)", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def setEntityProperty(self, ents, propName, **kwargs):\n spr = smtk.model.SetProperty.create()\n if hasattr(ents, '__iter__'):\n [spr.parameters().associate(ent.component()) for ent in ents]\n else:\n spr.parameters().associate(ents.component())\n spr.parameters().find('name').setValue(propName)\n if 'as_int' in kwargs:\n vlist = kwargs['as_int']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n intVal = spr.parameters().find('integer value')\n intVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n intVal.setValue(i, vlist[i])\n if 'as_float' in kwargs:\n vlist = kwargs['as_float']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n floatVal = spr.parameters().find('float value')\n floatVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n floatVal.setValue(i, vlist[i])\n if 'as_string' in kwargs:\n vlist = kwargs['as_string']\n if not hasattr(vlist, '__iter__'):\n vlist = [vlist, ]\n stringVal = spr.parameters().find('string value')\n stringVal.setNumberOfValues(len(vlist))\n for i in range(len(vlist)):\n stringVal.setValue(i, vlist[i])\n res = spr.operate()\n self.assertEqual(\n res.find('outcome').value(0),\n int(smtk.operation.Operation.SUCCEEDED),\n 'set property failed')\n return res.findInt('outcome').value(0)", "def update(table, id_):\n\n # your code\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Release date: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update_properties():\n state = request.get_json()\n if 'id' not in state:\n return jsonify({'success': False,\n 'error': 'ID not found in request!'})\n logger.debug(\"Updated Roast Properties: %s\" % state)\n c = mongo.db[app.config['HISTORY_COLLECTION']]\n roast_id = paranoid_clean(state.get('id'))\n item = c.find_one({'_id': ObjectId(roast_id)}, {'_id': 0})\n if not item:\n return jsonify({'success': False, 'message': 'No such roast.'})\n item = {'notes': state.get('notes'),\n 'input_weight': state.get('input_weight'),\n 'output_weight': state.get('output_weight')}\n c.update({'_id': ObjectId(roast_id)}, {'$set': item})\n return jsonify({'success': True})", "def copy_from_entity(self, entity):\n for prop in entity._EndpointsPropertyItervalues():\n attr_name = prop._code_name\n value = getattr(entity, attr_name)\n if value is not None:\n if isinstance(prop, properties.EndpointsAliasProperty):\n value_set = getattr(self, attr_name) is not None\n elif isinstance(prop, ComputedProperty):\n value_set = True\n else:\n value_set = prop._name in self._values\n if not value_set:\n setattr(self, attr_name, value)", "def patch(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n result = []\r\n status_code = 204\r\n try:\r\n # This only returns a value (boolean) for \"op\": \"test\"\r\n result = patch_item(song, request.get_json())\r\n db.session.commit()\r\n except Exception:\r\n # If any other exceptions happened during the patching, we'll return 422\r\n result = {\"success\": False, \"error\": \"Could not apply patch\"}\r\n status_code = 422\r\n\r\n return make_response(jsonify(result), status_code)", "def from_entity(cls, e):\n kwargs = {name: e.get(name) for name, prop in cls._properties.items() if prop.is_id} # we need the id value\n obj = cls(**kwargs)\n obj._key = e.key\n\n for name, prop in cls._properties.items(): # set values\n if not prop.is_id:\n obj[name] = e.get(name)\n\n return obj", "def put(self, obj):\n\n if obj is None:\n return\n\n assert isinstance(obj, str), (\n f\"object is not of type string, \"\n f\"but {type(obj)} for fly identifier attribute\")\n\n obj = obj.strip()\n\n return obj", "def slo_update(obj, product_name, slo_id, title, description, slo_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slo = client.slo_list(product, id=slo_id)\n if not slo:\n fatal_error('SLO {} does not exist'.format(slo_id))\n\n slo = slo[0]\n\n with Action('Updating SLO {} for product {}'.format(slo_id, slo['product_name']), nl=True) as act:\n if slo_file:\n slo = json.load(slo_file)\n slo['uri'] = slo['uri']\n else:\n if title:\n slo['title'] = title\n if description:\n slo['description'] = description\n\n validate_slo(slo, act)\n\n if not act.errors:\n slo = client.slo_update(slo)\n\n print(json.dumps(slo, indent=4))", "def write(self, id, data):\n return self._call('%s.update' % self._shopware_model,\n [int(id), data])", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def patch(self, request, slug, **kwargs):\n request.POST._mutable = True\n payload = request.data\n payload.pop('client', None)\n obj = self.get_object()\n # update main image\n updated_main_image = Uploader.upload_image_from_request(request)\n if updated_main_image:\n payload['image_main'] = updated_main_image\n # update image list\n updated_image_list = Uploader.upload_image_batch(\n request, instance=obj)\n if updated_image_list:\n payload.setlist('image_others', updated_image_list)\n # update videos\n video = Uploader.upload_video_from_request(request)\n if video:\n payload['video'] = video\n serializer = self.serializer_class(obj, data=payload, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.update(obj, payload)\n response = {\n \"data\": {\"property\": serializer.data},\n \"message\": \"Successfully updated your property\"\n\n }\n return Response(response)", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500", "def patch(id):\n\n if not request.json or not 'name' in request.json:\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name is required\"\n }), 400)\n\n data = request.get_json(force=True)\n if isinstance(data['name'], int):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Name should be of type strings\"\n }), 400)\n\n if Party.get_party_by_name(data[\"name\"]):\n return make_response(jsonify({\n \"status\": 409,\n \"error\": \"Party name already taken\"\n }), 409)\n if Validate.validate_empty_string(data_inputed=data[\"name\"]):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name cannot be empty\"\n }), 400)\n update_data = request.get_json(force=True)\n party_to_edit = Party.get_party_by_id(id=id)[0]\n party_to_edit = Party.update_party(update_data=update_data,id=id)\n return make_response(jsonify({\n \"status\": 201,\n \"data\": party_to_edit\n }), 201)", "async def modify(self, pak_id: str, props: Property, source: str) -> ItemVariant:\n vbsp_config: lazy_conf.LazyConf\n if 'config' in props:\n # Item.parse() has resolved this to the actual config.\n vbsp_config = get_config(\n props,\n 'items',\n pak_id,\n )\n else:\n vbsp_config = self.vbsp_config\n\n if 'replace' in props:\n # Replace property values in the config via regex.\n vbsp_config = lazy_conf.replace(vbsp_config, [\n (re.compile(prop.real_name, re.IGNORECASE), prop.value)\n for prop in\n props.find_children('Replace')\n ])\n\n vbsp_config = lazy_conf.concat(vbsp_config, get_config(\n props,\n 'items',\n pak_id,\n prop_name='append',\n ))\n\n if 'description' in props:\n desc = desc_parse(props, source, pak_id)\n else:\n desc = self.desc\n\n if 'appenddesc' in props:\n desc = tkMarkdown.join(\n desc,\n desc_parse(props, source, pak_id, prop_name='appenddesc'),\n )\n\n if 'authors' in props:\n authors = sep_values(props['authors', ''])\n else:\n authors = self.authors\n\n if 'tags' in props:\n tags = sep_values(props['tags', ''])\n else:\n tags = self.tags.copy()\n\n variant = ItemVariant(\n pak_id,\n self.editor,\n vbsp_config,\n self.editor_extra.copy(),\n authors=authors,\n tags=tags,\n desc=desc,\n icons=self.icons.copy(),\n ent_count=props['ent_count', self.ent_count],\n url=props['url', self.url],\n all_name=self.all_name,\n all_icon=self.all_icon,\n source=f'{source} from {self.source}',\n )\n [variant.editor] = variant._modify_editoritems(\n props,\n [variant.editor],\n pak_id,\n source,\n is_extra=False,\n )\n\n if 'extra' in props:\n variant.editor_extra = variant._modify_editoritems(\n props.find_key('extra'),\n variant.editor_extra,\n pak_id,\n source,\n is_extra=True\n )\n\n return variant", "def normalize_idp(idp):\n if idp is None:\n return None\n\n _idp = idp.to_dict()\n _idp['enabled'] = idp['is_enabled']\n _idp['name'] = idp['id']\n return _idp", "def update(cls, dto: dict):\n entity = cls.from_dict(dto)\n try:\n valid_entity = cls.find_by_id(dto[\"id\"])\n except KeyError as e:\n raise AppException(\"Can't find key {}\".format(e))\n\n if not valid_entity:\n return None\n\n # validate creation your creation.\n entity.creation_validation()\n\n # Copy all attributes from entity to valid_entity.\n valid_entity << entity\n\n return valid_entity", "def put(self, obj):\n\n if obj is None:\n return\n\n assert isinstance(obj, str), (\n f\"object is not of type string, \"\n f\"but {type(obj)} for phone attribute\")\n\n obj = obj.strip()\n\n return obj", "def put_record(table, id):\n try:\n my_class = load_entity(table)\n except LoaderError as e:\n abort(400, e)\n\n p = my_class[id]\n for k in request.params:\n setattr(p, k, getattr(request.params, k))\n commit()\n return serialize_entity(p)", "def put(self, op_id: str) -> Response:\n data = request.get_json()\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n try:\n res = Opportunity.objects.get(id=op_id).update(**data)\n except ValidationError as e:\n return bad_request(e.message)\n return jsonify(res)\n else:\n return forbidden()", "def edit_single_list(current_user, id):\n if request.content_type == 'application/json':\n try:\n int(id)\n except ValueError:\n return response('failed', 'Please provide a valid ShoppingList Id', 400)\n else:\n\n shoplist = ShoppingList.query.filter_by(user_id=current_user.id, id=id).first()\n if shoplist is not None:\n data = request.get_json()\n name = data.get('name')\n description = data.get('description')\n if name:\n if re.match(\"^^([a-zA-Z0-9]+[ \\s])*[a-zA-Z0-9]+$\", name) and name.strip(' ')[0]:\n shoplist.name = name\n shoplist.description = description\n db.session.commit()\n return make_response(jsonify({\n 'name': shoplist.name,\n 'description': shoplist.description,\n 'message': 'Shopping list has been updated'\n\n })), 200\n return response('failed',\n 'Wrong name format. Name cannot contain special characters or start with a space',\n 400)\n return response('failed', 'No name input. Try again', 403)\n return response('failed', 'Shopping list does not exist. Please try again', 404)\n return response('failed', 'Content-type must be json', 202)", "def put(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n else:\n boat.name = None\n if 'type' in boat_data:\n boat.type = boat_data['type']\n else:\n boat.type = None\n if 'length' in boat_data:\n boat.length = boat_data['length']\n else:\n boat.length = None\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def pack_entity(e,excludes=None):\n if not e or not hasattr(e,'properties'):\n logging.warning('models.pack_entity(%s), bad entity'%e)\n return {}\n rs = {'id':'%s'%e.key().id_or_name()}\n for pname,ptype in e.properties().items():\n if pname not in excludes:\n pvalue = getattr(e,pname)\n if pvalue is None:\n rs[pname] = ''\n elif isinstance(ptype,db.BooleanProperty):\n rs[pname] = str(pvalue).lower()\n elif isinstance(ptype,db.DateTimeProperty):\n rs[pname] = '%s'%datetime.strftime(pvalue,'%Y-%m-%d %H:%M:%S')\n elif isinstance(ptype,db.TextProperty):\n if pvalue.startswith('{') or pvalue.startswith('['):\n rs[pname] = eval(pvalue)\n else:\n rs[pname] = pvalue\n else:\n rs[pname] = pvalue\n return rs" ]
[ "0.5986681", "0.5305798", "0.49620193", "0.48357922", "0.47986293", "0.47317642", "0.47040105", "0.47010326", "0.46749356", "0.4651737", "0.46378273", "0.46107113", "0.45864484", "0.45753363", "0.45692047", "0.4544636", "0.4535495", "0.4522036", "0.45060802", "0.44929498", "0.44819164", "0.44818738", "0.4481563", "0.44700506", "0.44640324", "0.44548503", "0.4425318", "0.44195148", "0.44164777", "0.44102696" ]
0.582127
1
Manage a Boat Departure. Returns json of affected Slip details or ignores if requested departure Boat ID does not match requested Slip's current_boat ID.
def patch(self, id=None): if id: boat2Depart = test4ValidEntity(id) if boat2Depart == None: self.response.set_status(404) else: requestBody = json.loads(self.request.body) query = Slip.query(Slip.number == requestBody['number']) result = query.fetch(limit = 1) for match in result: if match.current_boat == boat2Depart.id and match.number == requestBody['number']: boat2Depart.at_sea = True boat2Depart.put() match.current_boat = None match.arrival_date = None match.departure_date = requestBody['departure_date'] match.departed_boat = boat2Depart.id match.put() slip_dict = match.to_dict() del slip_dict['departure_history'] self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(slip_dict)) else: self.response.set_status(400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for requested Slip number already in use. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n for match in results:\n if slip_data['number'] == match.number:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n else:\n slip.current_boat = None\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n else:\n slip.arrival_date = None\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n else:\n slip.departed_boat = None\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n else:\n slip.departure_date = None\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def get(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_dict = slip.to_dict()\n slip_dict['departure_history'] = {}\n slip_dict['departure_history']['departure_date'] = slip.departure_date\n slip_dict['departure_history']['departed_boat'] = slip.departed_boat\n del slip_dict['departed_boat'], slip_dict['departure_date']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def put():\n\n logger.debug('Catch PUT request by URL /api/departments.')\n return abort(405)", "def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())", "def place_bid():\n if not request.get_json():\n abort(400)\n data = request.get_json(force=True)\n\n if not data.get('userID'):\n abort(400)\n if not data.get('amount'):\n abort(400)\n if not data.get('petID'):\n abort(400)\n\n #new_uuid = str(uuid.uuid4())\n mod.place_a_bid(data['petID'], data['amount'], data['userID'])\n # HTTP 200 Created\n # return jsonify({\"id\": new_uuid}), 200\n resp = {\"status\": \"OK\"}\n return jsonify(resp)", "def post(self, flight_id):\n data = request.get_json()\n seat = 1\n if data:\n seat = data.get('seat')\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n if seat == 1 and flight.booked_economy < flight.airplane.economy_seats:\n data = dict(booked_economy=flight.booked_economy+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Economy seat flight reservation successfull', 201)\n\n if seat == 2 and flight.booked_business < flight.airplane.business_seats:\n data = dict(booked_business=flight.booked_business+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Business seat flight reservation successfull', 201)\n\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401", "def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))", "def post(id_=None):\n\n logger.debug('Catch POST request by URL /api/departments/%i.', id_)\n return abort(405)", "def lead_detail(request, pk):\n try:\n lead = Lead.objects.get(pk=pk)\n except Lead.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = LeadSerializer(lead)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = LeadSerializer(lead, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n lead.delete()\n return HttpResponse(status=204)", "def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201", "def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200", "def patch(self, request: 'Request', pk: int, format=None) -> Response:\n travel = Viagem.get_travel(request.user, pk)\n serializer = TravelSerialization(\n travel, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def get_adventure_detail(request):\n if request.is_ajax():\n user = request.user\n game_saved = user.game_saved\n adventure_id = game_saved.adventure_saved\n task_num = game_saved.task_saved\n adventure = Adventure.objects.get(adventure_id=adventure_id)\n Adventures_info = adventures_info.objects.get(adventure_name=adventure)\n task = Task.objects.get(adventure_name=adventure, task_number=task_num)\n\n\n alist =[\n {\n \"name\" : str(adventure.adventure_name),\n \"items\" : str(Adventures_info.items_needed),\n \"expenses\" : str(Adventures_info.expenses),\n \"locations\" : Adventures_info.locations,\n \"mapaddress\" : str(task.google_map),\n \"theme_character_url\" : str(adventure.theme_character_url)\n }\n\n ]\n\n return JsonResponse(alist, safe=False)\n else:\n raise PermissionDenied()", "def put(self, request):\n\n data = request.data\n career_planning_id = data['career_planning_id']\n data.pop(\"career_planning_id\")\n LOGGER.info(\"career_planning id:%d\", career_planning_id)\n career_planning_data = CareerPlanning.objects.filter(id=career_planning_id)\n\n try:\n career_planning_data.update(**data)\n LOGGER.info(\"CareerPlanning data updated successfully\")\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record updated successfully\"})\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})", "def close_bid(request, listing_id): \n try:\n listing = Listing.objects.get(pk=listing_id) \n except Listing.DoesNotExist:\n return JsonResponse({\"success\":False})\n\n if request.user == listing.seller:\n listing.isActive = False\n listing.save()\n return JsonResponse({\"success\":True})\n\n return JsonResponse({\"success\":False})", "def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200", "def billing_info_detail(request, pk):\n try:\n billing_info = BillingInfo.objects.get(pk=pk)\n except BillingInfo.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = BillingInfoSerializer(billing_info)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = BillingInfoSerializer(billing_info, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n billing_info.delete()\n return HttpResponse(status=204)", "def post(self):\n parent_key = ndb.Key(Boat, \"parent_boat\")\n boat_data = json.loads(self.request.body)\n new_boat = Boat(id=None, name=boat_data['name'], type=boat_data['type'],\n length=boat_data['length'], at_sea=True, parent=parent_key)\n new_boat.put()\n new_boat.id = '/Boat/' + new_boat.key.urlsafe()\n new_boat.put()\n boat_dict = new_boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def detail(request, bid):\n result = {\n 'status': '', # 'success' or 'failure'\n 'msg': '', # msg of the book\n 'error_msg': '', # notes of failure\n }\n # the error method\n if not is_get(request, result):\n return HttpResponse(json.dumps(result))\n\n # filter the list of book\n # actually, the number of book is 0 or 1\n books = models.BookInfo.objects.filter(id=bid).first()\n\n # if the number of book is 0\n if books is None:\n result['status'] = 'failure'\n result['error_msg'] = 'invalid book id'\n return HttpResponse(json.dumps(result))\n\n book_dict = dict()\n # transfer db obj to dict\n book = process_book_obj(books)\n book_dict[str(books.id)] = json.dumps(book)\n\n result['status'] = \"success\"\n result['msg'] = json.dumps(book_dict)\n\n return HttpResponse(json.dumps(result))", "def Addtoinventory(request): \n modes=['manage','add','order']\n departments={}\n booklist=[]\n for league in models.Dept.objects.all(): \n departments[league.pk]=league\n \n message=\"\"\n nonemptyAuthors = [x for x in request.POST.getlist('AuthorName') if x!='']\n nonemptybooknames = [x for x in request.POST.getlist('bookName') if x!='']\n nonemptybookDesc = [x for x in request.POST.getlist('bookdesc') if x!='']\n nonemptyQuantities = [x for x in request.POST.getlist('Quantity') if x!='']\n nonemptyRows = [x for x in request.POST.getlist('RowRack') if x!='']\n nonemptyselectedDeparts = [x for x in request.POST.getlist('depart_select') if x!='NA']\n \n for j,k,h,fa,z,loc in itertools.zip_longest(nonemptyAuthors,nonemptybooknames,nonemptybookDesc,nonemptyselectedDeparts,nonemptyQuantities,nonemptyRows):\n shortname=k[1:5] \n values=k.split(\"-\")\n if len(values)==1:\n ye=dt.today().year\n values.extend(['I',ye,'0'])\n \n if loc is not None:\n c=loc.split(\"-\")\n if len(c)==1:\n c.extend(['0','0'])\n else:\n #setting default value\n c=[\"20\",\"10\",\"1\"]\n if len(values) >0:\n try:\n departmentDetails=models.Dept.objects.get(dpt_id=fa)\n except Exception as e:\n print(e)\n pass\n try:\n i=0\n testa = models.Atr.objects.values('a_id')\n for test in testa:\n if i>int(test['a_id']):\n i=i\n else:\n i=int(test['a_id'])\n \n varas = models.Atr.objects.values('name')\n isin=False\n for f in list(varas):\n if str(j).lower() == f['name'].lower():\n isin=True\n break\n if isin:\n pass\n else:\n models.Atr.objects.create(a_id=str(i+1),name=str(j),title=\"Mr.\",email=\"[email protected]\")\n except Exception as e:\n if \"does not\" in str(e):\n models.Atr.objects.create(a_id=str(i+1),name=str(j),title=\"Mr.\",email=\"[email protected]\")\n print(e)\n pass\n varset=None\n try:\n i=0;\n testab = models.Bks.objects.values('b_id')\n for test in testab:\n if i>int(str(test['b_id']).split('_')[2]):\n i=i\n else:\n i=int(str(test['b_id']).split('_')[2])\n if (models.Bks.objects.filter(title=str(values[0])).exists()):\n try: \n if not models.Bks.objects.filter(title=str(values[0]),edition=str(values[1]),p_year=str(values[2]),pub=str(values[3])).exists():\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=str(i+1),dpt_id_id=str(fa))\n else:\n message=\"book with the same name already exists\"\n except Exception as e:\n print(e)\n else:\n if isin:\n atrobj=models.Atr.objects.get(name=str(j))\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=atrobj.a_id,dpt_id_id=str(fa))\n else:\n atrobj=models.Atr.objects.get(name=str(j))\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=atrobj.a_id,dpt_id_id=str(fa))\n\n except Exception as e:\n if \"does not\" in str(e):\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=str(i+1),dpt_id_id=str(fa))\n print(e)\n pass\n \n try:\n g=0\n bookobj =models.Bks.objects.filter(title=str(values[0]),edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]))\n testba = models.Invt.objects.values('id') \n for test in testba:\n if g>int(str(test['id'])):\n g=g\n else:\n g=int(str(test['id']))\n \n Invobj=models.Invt.objects.filter(i_id_id=\"IN_\"+shortname+\"_\"+str(g+1))\n\n if len(bookobj) >= 0:\n if(len(Invobj) == 0):\n for s in bookobj:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=s.b_id,shelf=str(c[0]),rack=str(c[1]),row=int(c[2]))\n else:\n for s in bookobj:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=s.b_id,shelf=str(c[0]),rack=str(c[1]),row=int(c[2]))\n\n else:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=\"IN_\"+shortname+\"_\"+str(g+1),shelf=str(c[0]),rack=str(c[1]),row=int(c[2]))\n except Exception as e:\n try:\n if \"does not\" in str(e): \n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=\"IN_\"+shortname+\"_\"+str(g+1),shelf=str(c[0]),rack=str(c[1]),row=int(c[2]))\n else:\n t=models.Invt.objects.get(i_id_id=\"IN_\"+shortname+\"_\"+str(g+1))\n t.qty= t.qty+int(z)\n t.save()\n except Exception as e:\n print(e)\n \n else:\n message=\"the book details are not given properly\"\n pass\n\n return render(\n request,\n 'app/manageInv.html',\n {\n 'title':'Manage Inventory',\n 'invmodes':modes,\n 'dispmode':'manage',\n 'message':message,\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'books':get_valid_Books().values(),\n 'year':datetime.now().year,\n }\n )", "def booking_update(id):\n if request.method == \"POST\":\n month = request.form.get(\"month\")\n day = request.form.get(\"day\")\n year = request.form.get(\"year\")\n\n booking = Booking.query.filter_by(id=id).first()\n booking.first_name = request.form.get(\"first_name\")\n booking.last_name = request.form.get(\"last_name\")\n booking.phone = request.form.get(\"phone\")\n booking.email = request.form.get(\"email\")\n booking.num_participants = request.form.get(\"num_participants\")\n booking.booking_date = f\"{year}-{month}-{day}\"\n booking.collection_time = request.form.get(\"collection_time\")\n booking.return_time = request.form.get(\"return_time\")\n\n if request.form.get(\"rental_complete\") == \"True\":\n booking.rental_complete = True\n else:\n booking.rental_complete = False\n\n payment = Payment.query.filter_by(booking_id=id).first()\n payment.full_amount_due = float(request.form.get(\"full_amount_due\"))\n\n if request.form.get(\"remainder_due_check\") == \"paid\":\n payment.remainder_due = 0\n else:\n payment.remainder_due = request.form.get(\"remainder_due\")\n\n db.session.commit()\n\n return redirect(url_for(\"bookings.booking_index\"))\n\n booking = Booking.query.get(id)\n payment = Payment.query.filter_by(booking_id=id).first()\n booking_date = booking.booking_date.split(\"-\")\n date = {\"month\": booking_date[1], \"day\": booking_date[2],\n \"year\": booking_date[0]}\n return render_template(\"booking_update.html\", booking=booking,\n payment=payment, date=date)", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "async def _pilot_fleet(self, fleet_id: int) -> None:\n raise NotImplementedError()", "def apiEditar():\n response.view = 'generic.json'\n\n def GET(*args, **vars):\n # if not request.env.request_method == 'GET': raise HTTP(403)\n # proveedores = db().select(db.proveedor.ALL).as_list()\n return dict()\n\n def POST(*args, **vars):\n # raise HTTP(403)\n pieza = vars[\"pieza\"]\n proveedores_list = vars[\"proveedores_list\"]\n respuesta = \"ok\"\n idProveedor = 0\n\n fechaIn = pieza[\"fechaIn\"].split('/')\n fechaString = fechaIn[2] + '-' + fechaIn[1] + '-' + fechaIn[0]\n\n pieza_data = dict(codigo=pieza[\"codigo\"], nombre=pieza[\"nombre\"],\n descripcion=pieza[\"descripcion\"], cantidad=pieza[\"cantidad\"],\n unidad=pieza[\"unidad\"], precio_entrada=pieza[\"precioIn\"],\n precio_salida=pieza[\"precioOut\"], fecha_entrada=fechaString)\n\n # Verificar si aumento la cantidad. Para asi dar nueva entrada\n cantidad_antigua = db.pieza(pieza[\"id\"]).cantidad\n cantidad_nueva = int(pieza[\"cantidad\"]) - cantidad_antigua if cantidad_antigua < int(\n pieza[\"cantidad\"]) else int(pieza[\"cantidad\"])\n\n registrar_entrada = cantidad_antigua != int(pieza[\"cantidad\"]) and cantidad_nueva != 0\n\n if registrar_entrada:\n id_pieza_entrada = db.pieza_entrada.insert(codigo=pieza[\"codigo\"], nombre=pieza[\"nombre\"],\n descripcion=pieza[\"descripcion\"], cantidad=cantidad_nueva,\n unidad=pieza[\"unidad\"], precio_entrada=pieza[\"precioIn\"],\n precio_salida=pieza[\"precioOut\"], fecha_entrada=datetime.datetime.now().date())\n\n for prov in proveedores_list:\n db.proveedor_entrada.insert(\n id_pieza_entrada=id_pieza_entrada, nombre=prov[\"nombre\"], descripcion=prov[\"descripcion\"],\n direccion=prov[\"direccion\"], telefono=prov[\"telefono\"])\n\n # Actualizo la pieza\n try:\n db(db.pieza.id == pieza[\"id\"]).update(**pieza_data)\n db(db.pieza_proveedor.id_pieza == pieza[\"id\"]).delete()\n\n for prov in proveedores_list:\n db.pieza_proveedor.insert(\n id_pieza=pieza[\"id\"], id_proveedor=prov[\"id\"])\n\n except Exception:\n respuesta = \"error\"\n\n return dict(respuesta=respuesta)\n # return dict(proveedor=proveedor)\n\n return locals()", "def post(self, request):\n data = request.data\n if not data[\"legal_name\"]:\n if not data[\"legal_name\"] and data[\"lei\"]:\n data[\"legal_name\"] = get_legal_name(data[\"lei\"])\n\n serialiser = BondSerializer(data=data)\n\n if serialiser.is_valid():\n serialiser.save()\n return Response(serialiser.data, status=status.HTTP_201_CREATED)\n return Response(serialiser.errors, status=status.HTTP_400_BAD_REQUEST)", "def delete(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n if boat.at_sea == False:\n query = Slip.query(Slip.current_boat == boat.id)\n result = query.fetch(limit = 1)\n for match in result:\n match.current_boat = None\n match.arrival_date = None\n match.put()\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\") \n else:\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\")", "def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})", "def details_bouquets(request, bouquet_id):\n if request.method == \"GET\":\n template = \"bouquets/bouquets_details.html\"\n bouquet = get_bouquet(bouquet_id)\n bouquet_flowers = get_relations(bouquet_id)\n context = {\n \"bouquet\": bouquet,\n \"bouquet_flowers\": bouquet_flowers\n }\n return render(request, template, context)\n elif request.method == \"POST\":\n form_data = request.POST\n\n if \"actual_method\" in form_data and form_data[\"actual_method\"] == \"PUT\":\n update_bouquet(form_data, bouquet_id)\n return redirect(\"bouquetapp:bouquet\", bouquet_id=bouquet_id)", "def post(self) -> Response:\n\n data = request.get_json()\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n data['organization'] = get_jwt_identity()\n try:\n opportunity = Opportunity(**data).save()\n except ValidationError as e:\n return bad_request(e.to_dict())\n\n output = {'id': str(opportunity.id)}\n return jsonify(output)\n else:\n return forbidden()" ]
[ "0.6218137", "0.60290563", "0.53608197", "0.529036", "0.5263001", "0.5204306", "0.5138205", "0.5055655", "0.50181365", "0.4956566", "0.49363345", "0.49132457", "0.49068174", "0.4871912", "0.4846284", "0.48194218", "0.4819195", "0.48055452", "0.47923288", "0.47755215", "0.47520903", "0.47391406", "0.47299448", "0.47170222", "0.47022834", "0.46941087", "0.4686508", "0.46814698", "0.46803495", "0.46767092" ]
0.63698965
0
This function creates and saves the game skeleton demo level.
def create_level(self, name): # Create a level object level = Level() size_y=8 size_x=10 # Separates static and non static parts # This will speed up network games, since only the non static part will be # sent on the network level_static = soya.World(level) # Load 3 materials (= textures) for files ./materials{grass|ground|snow}.data ground = soya.Material.get("block2") # Creates a landscape, from the heighmap "./images/map.png" # The landscape is in the static part (=level_static), because it won't change along the game. land = soya.Land(level_static) land.y =0.0 land.from_image(soya.Image.get("floor.png")) # Sets how high is the landscape land.multiply_height(-0.0) # These values are trade of between quality and speed land.map_size = 8 land.scale_factor = 1.5 land.texture_factor = 1.0 # Set the texture on the landscape, according to the height # (i.e. height 0.0 to 15.0 are textured with grass, ...) land.set_material_layer(ground, 0.0, 25.0) # squares where the player starts # Note that this is stored in physical, not abstract, coordinates. always_clear=[(-1,-1),(-2,-1),(0,-1),(-1,-2),(-1,0)] cube = soya.Shape.get("cube") # r and c represent the cube positions in the grid, # while x and y represent the physical coordinates in the world. # Note the simple formula: r = x + self.size_x , c = y + self.size_y border_row, border_col = 2*size_x - 2, 2*size_y - 2 for r, x in enumerate(range(-size_x,size_x-1)): for c, y in enumerate(range(-size_y,size_y-1)): bx = x +128 by = y +128 if (r % 2 == 0 and c % 2 == 0) or \ (r == 0 or c == 0 or r == border_row or c == border_col ): # This is a wall block block = soya.Volume(level_static, cube) block.scale(1.0, 1.0, 1.0) block.set_xyz(bx, 0.5, by) elif random() < 0.8 and not (x, y) in always_clear: # A soft block block = SoftBox() level.add_mobile(block) block.scale(1.0, 1.0,1.0) block.set_xyz(bx, 0.5, by) # Creates a light in the level, similar to a sun (=a directional light) sun = soya.Light(level_static) sun.directional = 1 sun.diffuse = (1.0, 0.8, 0.4, 1.0) sun.rotate_vertical(-45.0) # Creates a sky atmosphere, with fog atmosphere = soya.SkyAtmosphere() atmosphere.ambient = (0.3, 0.3, 0.4, 1.0) atmosphere.fog = 1 atmosphere.fog_type = 0 atmosphere.fog_start = 40.0 atmosphere.fog_end = 50.0 atmosphere.fog_color = atmosphere.bg_color = (0.2, 0.5, 0.7, 1.0) atmosphere.skyplane = 1 atmosphere.sky_color = (1.5, 1.0, 0.8, 1.0) # Set the atmosphere to the level level.atmosphere = atmosphere # Save the level as "./worlds/level_demo.data" (remember, levels are subclasses of worlds) level_static.filename = level.name = name+"_bbomber_static" level_static.save() level.filename = level.name = name+"_bbomber" level.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_file(self, sub):\n fileout = os.path.join(self.saving_dir, 'output_skeleton_' + str(sub) + '.nii.gz')\n print('writing altered skeleton to', fileout)\n aims.write(self.skel, fileout)", "def setupLevel(self):\n\n self.state = GameState.SETUP\n\n # vado a leggere il dizionario corrispondente\n # al numero di livello corrente facendo in modo\n # che se il numero di livello richiesto non esiste\n # carico quello più vicino a quello richiesto\n if self.levelIndex>= len(levels):\n self.levelIndex = len(levels) -1\n elif self.levelIndex <0:\n self.levelIndex = 0\n\n level = levels[self.levelIndex]\n\n # nome del livello\n self.level_name = level.get(\"name\", \"Livello %s\" % (self.levelIndex+1))\n\n # dimensione del labirinto (numero di righe e di colonne)\n self.nrows = level.get(\"nrows\", 20)\n self.ncols = level.get(\"ncols\", 20)\n\n # l'algoritmo di generazione del labirinto supporta solo un numero di\n # righe e di colonne dispari, quindi approssimiamo le dimensioni ai\n # valori dispari più vicini\n if self.nrows % 2 == 0:\n self.nrows+=1\n if self.ncols % 2 == 0:\n self.ncols+=1\n\n\n # fattore di scala del labirinto\n # attenzione che, fattori di scala molto\n # grandi, rallentano le prestazioni di gioco\n self.scale = level.get(\"scale\", 30)\n\n background_image_filename = level.get(\"background_image\", None)\n if background_image_filename!=None:\n self.background_image = pygame.image.load(background_image_filename).convert()\n else:\n self.background_image = None\n\n # parametri usati dall'algoritmo di generazione del labirinto\n # si veda https://en.wikipedia.org/wiki/Maze_generation_algorithm\n self.maze_density = level.get(\"maze_density\", Game.MAZE_DENSITY)\n self.maze_complexity = level.get(\"maze_complexity\", Game.MAZE_COMPLEXITY)\n\n # colore delle monete\n self.coin_color = level.get(\"coin_color\", Game.YELLOW)\n\n # tempo a disposizione per completare il livello\n self.time = level.get(\"time\", 240)\n self.clockTime = level.get(\"clock\", 80)\n\n # numero di nemici\n self.numEnemies = level.get(\"num_enemies\", 0)\n\n # numero di ricaricatori temporali\n self.numTimeReloaders = level.get(\"time_reloaders\", 0)\n\n # numero di bombe \"distruggi monete\"\n self.bonus_bombs = level.get(\"bombs\", [])\n # numero di bombe \"distruggi muri\"\n self.bonus_wall_bombs = level.get(\"wall_bombs\", [])\n # numero di bombe \"distruggi nemici\"\n self.bonus_enemy_killers = level.get(\"enemy_killers\", [])\n # numero di pizze che rendono i nemici golosi di monete\n self.bonus_greedy_enemies = level.get(\"greedy_enemies\", 0)\n # numero di portali (teletrasporto del giocatore)\n self.bonus_portals = level.get(\"portals\", 0)\n\n # proiettili a disposizione del giocatore per un certo periodo di tempo\n self.bonus_player_bullets = level.get(\"player_bullets\", [])\n\n #numero di bonus che rendono il giocatore invisibile per un certo periodo di tempo\n self.bonus_invisibility_players = level.get(\"invisibility_players\", [])\n\n # numero di shooters (nemici che sparano contro il giocatore)\n self.numShooters = level.get(\"num_shooters\" , [])\n\n\n # suoni di collisione\n self.sound_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n self.sound_bomb_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/bombexplosion.ogg\")\n\n\n # suono della moneta raccolta\n #self.sound_coin = pygame.mixer.Sound(\"Effects/SFX/beep_7.wav\")\n self.sound_coin = pygame.mixer.Sound(\"Effects/jute-dh/gold.wav\")\n\n # suono del timeReloader\n self.sound_time_reloader = pygame.mixer.Sound(\"Effects/SFX/echo_5.wav\")\n\n # suono di collisione con enemy killer\n self.sound_enemy_killer = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n\n # suono dell'invisibility player\n self.sound_invisibility_player = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono del teletrasporto\n self.sound_portal = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono dell'arma presa e del proiettile sparato\n self.sound_weapon = pygame.mixer.Sound(\"Effects/jute-dh/hit_2m.wav\")\n\n # suono dei greedy enemies\n self.sound_greedy_enemies = pygame.mixer.Sound(\"Effects/sound_effects/squeak2.wav\")\n\n # suono del levello completato\n self.sound_completed_level = pygame.mixer.Sound(\"Effects/sound_effects/level_completed.wav\")\n\n #\n # IMMAGINI DEGLI SPRITE DI GIOCO: CONFIGURABILE DA FILE DI CONFIGURAZIONE!!\n #\n\n # immagine delle pareti del labirinto\n self.wall_filename = level.get(\"wall\", \"Backgrounds/Dim/Boards.jpg\")\n\n # immagine dei nemici del labirinto\n self.enemies_filename = level.get(\"enemies\", \"Sprites/Animals/duck.png\")\n\n # immagine dei nemici del labirinto che possono anche sparare\n # di default gli shooters hanno lo stesso aspetto dei nemici normali\n self.shooters_filename = level.get(\"shooters\", self.enemies_filename)\n\n # immagine della bomba distruggi monete\n self.bomb_filename = level.get(\"bomb\", \"Sprites/bomb_bonus.png\")\n # immagine della bomba distruggi muri\n self.wall_bomb_filename = level.get(\"wall_bomb\", \"Sprites/bomb_wall_bonus.png\")\n\n self.time_reloaders_filename = level.get(\"time_reloader\", \"Sprites/clessidra.png\")\n self.enemy_killers_filename = level.get(\"enemy_killer\", \"Sprites/skull2.png\")\n self.greedy_enemies_filename = level.get(\"greedy_enemy\", \"Sprites/pizza.png\")\n self.portals_filename = level.get(\"portal\", \"Sprites/CrawlStone/portal.png\")\n self.invisibility_players_filename = level.get(\"invisibility_player\", \"Sprites/CrawlStone/wizard_hat_2.png\")\n\n # lo sprite che fornisce i proiettili ha la stessa immagine dei proiettili\n self.player_bullets_filename = level.get(\"player_bullet\", \"Sprites/CrawlStone/apple.png\")\n self.bonus_player_bullets_filename = self.player_bullets_filename\n\n self.shooters_bullets_filename = level.get(\"shooter_bullet\", \"Sprites/CrawlStone/apple.png\")\n\n #\n # GRUPPI DI SPRITES\n #\n\n # i muri del mio labirinto\n self.walls = pygame.sprite.Group()\n\n # i nemici\n self.enemies = pygame.sprite.Group()\n\n # i nemici che sparano fanno parte dello stesso gruppo dei nemici!\n #self.shooters = pygame.sprite.Group()\n\n # le bombe\n self.bombs = pygame.sprite.Group()\n\n # gli attivatori/disattivatori di nemici golosi\n self.greedyEnemies = pygame.sprite.Group()\n\n # le bombe che spaccano i muri\n self.wallBombs = pygame.sprite.Group()\n\n # i ricaritori temporali\n self.timeReloaders = pygame.sprite.Group()\n\n # le monete da raccogliere\n self.coins = pygame.sprite.Group()\n\n # i killer dei nemici\n self.enemyKillers = pygame.sprite.Group()\n\n # i portali per spostarsi in nuove aree\n self.portals = pygame.sprite.Group()\n\n # i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers = pygame.sprite.Group()\n\n # i proiettili sparati dal giocatore\n self.playerBullets = pygame.sprite.Group()\n\n # i proiettili sparati dagli shooters\n self.shooterBullets = pygame.sprite.Group()\n\n # il bonus che fornisce proiettili sparati dal giocatore\n self.bonusPlayerBullets = pygame.sprite.Group()\n\n\n self.free_locations = []\n\n # genero il labirinto che prescinde dai fattori di scala\n self.maze = self.generate_maze()\n #print(self.maze)\n\n # il giocatore e i nemici hanno una dimensione che dipende dal fattore di scala\n self.player = pygame.sprite.GroupSingle(Player(int(self.scale * 0.8), int(self.scale * 0.8),\n self.scale, 1,\n \"Sprites/pac-classic/ghost-red-front.png\",\n )\n )\n self.player.sprite.setWalls(self.walls)\n # imposto le immagini del giocatore sulla base della posizione\n # l'ordine è UP, DOWN , RIGHT, LEFT\n\n self.player.sprite.setImages([\n [\"Sprites/pac-classic/ghost-red-rear.png\",\n \"Sprites/pac-classic/ghost-red-front.png\",\n \"Sprites/pac-classic/ghost-red-right.png\",\n \"Sprites/pac-classic/ghost-red-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-orange-rear.png\",\n \"Sprites/pac-classic/ghost-orange-front.png\",\n \"Sprites/pac-classic/ghost-orange-right.png\",\n \"Sprites/pac-classic/ghost-orange-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-lblue-rear.png\",\n \"Sprites/pac-classic/ghost-lblue-front.png\",\n \"Sprites/pac-classic/ghost-lblue-right.png\",\n \"Sprites/pac-classic/ghost-lblue-left.png\",\n ],\n\n ]\n )\n\n\n\n\n #\n # CREAZIONE DEGLI SPRITES\n #\n\n # CREO I MIEI NEMICI\n self.createEnemies(self.numEnemies,self.enemies_filename,self.enemies)\n\n # CREO I MIEI NEMICI CHE SPARANO che aggiungo allo stesso gruppo dei nemici!\n self.createShooters(self.numShooters, self.shooters_filename, self.shooters_bullets_filename,self.shooterBullets,\n self.sound_weapon, self.enemies)\n\n # CREO LE BOMBE che sono ObjectDestroyer che distruggono le monete\n self.createObjectDestroyers(self.bonus_bombs,self.bomb_filename,self.bombs, self.coins)\n\n\n # CREO LE WALL BOMBS che sono WallDestroyer che consentono di distruggere i muri\n # interni del labirinto\n self.createInnerObjectDestroyers(self.ncols, self.nrows,self.bonus_wall_bombs,\n self.wall_bomb_filename,self.wallBombs,self.walls)\n # CREO GLI ENEMY KILLERS che sono ObjectDestroyer che consentono di eliminare i nemici\n self.createObjectDestroyers(self.bonus_enemy_killers, self.enemy_killers_filename, self.enemyKillers, self.enemies)\n\n # Creo GREEDY_ENEMIES come ENEMY che consentono di rendere, alternativamente, i nemici golosi di monete oppure no\n self.createEnemies(self.bonus_greedy_enemies, self.greedy_enemies_filename, self.greedyEnemies)\n\n # Alternativamente potrei creare GREED ENEMIES come ObjectDestroyer che in realtà non distruggono niente, ma rendono \"golosi\"\n # i nemici che stanno intorno a loro in modo che inizino a mangiare monete. Se stanno già mangiando\n # monete, al contrario, dovrebbero smettere. CHIEDERLO COME ESERCIZIO\n\n # CREO I TIME RELOADERS che consentono di ripristinare il tempo\n self.createEnemies(self.numTimeReloaders, self.time_reloaders_filename, self.timeReloaders)\n\n # CREO I PORTALI che consentono di trasferirsi in una nuova locazione random\n self.createEnemies(self.bonus_portals, self.portals_filename, self.portals)\n\n # CREO I TIME LIMITED POWERS, come quello che rende invisibile il giocatore\n self.createTimeLimitedPowers(self.bonus_invisibility_players, self.invisibility_players_filename, self.invisibilityPlayers)\n # e come il ricaricatore di proiettili\n self.createTimeLimitedPowers(self.bonus_player_bullets, self.bonus_player_bullets_filename, self.bonusPlayerBullets)\n\n self.mazeSurf = pygame.Surface((self.ncols * self.scale, self.nrows * self.scale))\n # disegno il labirinto coi suoi muri\n self.drawMaze()\n\n self.scrollSurface = self.mazeSurf.copy()\n #self.scrollSurface.fill((0, 0, 0))\n\n pos = random.choice(self.free_locations)\n print(\"Loc Player:%s\" % str(pos))\n\n self.player.sprite.setPosition(pos)\n\n # imposto posizione e movimento iniziale\n # ai vari gruppi di sprites\n\n self.setInitialPosition(self.enemies.sprites())\n self.setInitialPosition(self.bombs.sprites())\n self.setInitialPosition(self.wallBombs.sprites())\n self.setInitialPosition(self.timeReloaders.sprites())\n self.setInitialPosition(self.enemyKillers.sprites())\n self.setInitialPosition(self.greedyEnemies.sprites())\n self.setInitialPosition(self.portals.sprites())\n self.setInitialPosition(self.invisibilityPlayers.sprites())\n self.setInitialPosition(self.bonusPlayerBullets.sprites())\n\n #self.setInitialPosition(self.shooters.sprites())\n\n # normalmente i nemici non mangiano monete...\n self.enemies_eater = False\n\n\n # a inizio livello si dà tempo di 5 secondi al Giocatore per divincolarsi\n # da eventuali nemici che compaiono negli immediati dintorni\n # della posizione (casuale) in cui si viene a trovare\n # il giocatore a inizio livello\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.time,5))\n\n # imposto la musica del livello e la mando in esecuzione\n self.music = level.get(\"music\", \"./Music/Soundimage/Techno-Gameplay_Looping.ogg\")\n pygame.mixer.music.load(self.music)\n # mando in esecuzione in modalità loop (valore -1)\n pygame.mixer.music.play(-1)\n\n # barra di stato del gioco con informazioni sul punteggio\n self.setupGamebarSurface()", "def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)", "def create_scene():\n create_floor()\n if config.M != \"\":\n if config.LEVEL == 1:\n create_wall()\n create_enemy()\n create_gap()\n create_platform()\n create_marijuana()\n create_star()\n create_fish()\n elif config.LEVEL == 2:\n create_boss()\n create_platform()\n create_star()", "def makeSkeleton(self):\n model = \"phase_5/models/char/cog\" + string.upper(self.style.body) + \"_robot-zero\"\n anims = self.generateAnimDict()\n\n # remember the current anim\n anim = self.getCurrentAnim()\n\n # grab the drop shadow\n dropShadow = self.dropShadow\n if not dropShadow.isEmpty():\n dropShadow.reparentTo(hidden)\n \n # remove the old geometry\n self.removePart(\"modelRoot\")\n\n # load the skeleton geometry\n self.loadModel(model)\n self.loadAnims(anims)\n\n # set the scale on the skeleton actor (plus a little extra to make it look right)\n self.getGeomNode().setScale(self.scale * 1.0173)\n self.generateHealthBar()\n self.generateCorporateMedallion()\n # set the appropriate tie texture\n self.generateCorporateTie()\n self.setHeight(self.height)\n\n \n # some of the geometry needs to be backfaced and billboarded\n parts = self.findAllMatches('**/pPlane*')\n for partNum in range(0, parts.getNumPaths()):\n #print 'found billboarded part!'\n bb = parts.getPath(partNum)\n bb.setTwoSided(1)\n \n # redo the nametag and drop shadow\n self.setName(TTLocalizer.Skeleton)\n nameInfo = TTLocalizer.SuitBaseNameWithLevel % {\"name\": self.name,\n \"dept\": self.getStyleDept(),\n \"level\": self.getActualLevel(),}\n self.setDisplayName( nameInfo )\n\n # re-find the useful nulls\n self.leftHand = self.find(\"**/joint_Lhold\")\n self.rightHand = self.find(\"**/joint_Rhold\")\n self.shadowJoint = self.find(\"**/joint_shadow\")\n self.nametagNull = self.find(\"**/joint_nameTag\")\n \n if not dropShadow.isEmpty():\n dropShadow.setScale(0.75)\n if not self.shadowJoint.isEmpty():\n dropShadow.reparentTo(self.shadowJoint)\n\n # start the animation again\n self.loop(anim)\n\n # set the flag\n self.isSkeleton = 1", "def setupNewGame(self):\r\n self.level = 1\r\n self.num_cows = 2\r\n self.num_farmers = 1\r\n self.levelHeading = Text(self.gameDisplay, 120, 425, 175, self.light_orange, \"Farm 1\")\r\n self.shield_indicator.image = self.greenShield\r\n updatedHeading = self.levelHeading\r\n self.startUX[0] = updatedHeading", "def save(self) -> None:\n path = os.path.join(os.getcwd(), 'mancalaGame.gg')\n with open(path, 'wb') as handle: pickle.dump({\n \"state\": self.currentNode.gameState,\n \"playerType\": 1 if self.currentNode.playerType is MaxMinPlayer.MAX_PLAYER else 0\n },\n handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"game saved\")", "def setup_level_2() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 19, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(12, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 5, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 15, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(29, 45, 47, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 29, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(44, 54, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 55, 73, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 24, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(20, 24, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 19, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(34, 54, 24, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 73, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list) \n\n #create sword item for \"outfit change\" \n create_and_add_item_to_list(\"pics\\sword_item.png\", 0.05, 75, 100, level.item_list)\n\n #create mysterious figure for level\n create_and_add_character_to_list(\"pics\\mystery_figure.png\", 0.095, 270, 350, level.character_list)\n\n #create dialogue for mysterious figure character\n find_disguise_convo = Dialogue(300, 390, 300, 50, \"Someone will notice you!\\n I've hidden something in the servant's quarters,\\n to make you fit in with the nobility.\")\n level.dialogue_list.append(find_disguise_convo)\n\n #info prompts and text for level\n balcony = RoomInfo(640, 500, \"Balcony. Along with the forest and sea, you can see that a battle is coming.\")\n level.room_info_list.append(balcony)\n kitchen = RoomInfo(270, 90, \"Kitchen. There are plentry of servants around. Your torn clothes are eye-catching, and may sabotage your escape\")\n level.room_info_list.append(kitchen)\n great_hall = RoomInfo(270, 470, \"Great hall. You could have sworn that someone recognized you, but nobody acts to capture you.\")\n level.room_info_list.append(great_hall)\n sitting_room = RoomInfo(650, 230, \"Private sitting room. You find several sketches... sketches that look like a richer, healthier version of you.\")\n level.room_info_list.append(sitting_room)\n\n return level", "def setup_level_1() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 39, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 25, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 44, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 45, 74, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 30, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 9, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(15, 24, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 24, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list)\n\n #create knight character for level\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.2, 270, 470, level.character_list)\n\n #knight asks for bribe\n guard_convo = Dialogue(300, 500, 150, 50, \"I know who you are...\\n if you pay me,\\n I'll turn a blind eye.\")\n level.dialogue_list.append(guard_convo)\n\n #create coin item to bribe knight character\n create_and_add_item_to_list(\"pics\\gold_1.png\", 0.5, 400, 250, level.item_list)\n\n #create prompts and info for rooms for object\n cell = RoomInfo(120, 100, \"Dungeon cell. There's a note and key. Someone's waiting for you in the garden.\")\n level.room_info_list.append(cell)\n guard_room = RoomInfo(450, 280, \"Guardroom. There's the unconconsious bodies of the guards. Your saviours must've gone to great lengths...\")\n level.room_info_list.append(guard_room)\n torture_chamber = RoomInfo(120, 280, \"Torture chamber. You've been here before. They were questioning you, but you didn't answer.\")\n level.room_info_list.append(torture_chamber)\n battle_room = RoomInfo(650, 280, \"Battle room. You see that your captors are fighting revolutionaries- those who seek to bring back a lost king.\")\n level.room_info_list.append(battle_room)\n stairwell = RoomInfo(220, 520, \"Stairwell. There's a lone guard who doesn't look surprised to see you\")\n level.room_info_list.append(stairwell)\n\n return level", "def Skeleton(self):\n self.type = \"Skeleton\"\n self.image = pygame.image.load(\"Skeleton.gif\")\n self.bullet = pygame.image.load(\"ArrowLeft.gif\")\n self.cost = 3\n self.health = 30\n self.max_health = self.health\n self.base_damage = 2 \n self.damagedice = (3,2)\n self.base_defense = 1\n self.defensedice = (3,1)\n self.attack_cap = 2\n self.ranged = True\n self.color = GREY1\n self.activate()", "def prepare(self, level):\n self.greeterboard.welcome_player(\n i18n.OUT_MSG_LUCK.format(self.player_name)\n )\n self.scoreboard.set_labels()\n self.scoreboard.set_level(level)\n self.word_view.setText(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def save_sample(self, wad, path, root_path = '', wad_info=None):\n os.makedirs(path, exist_ok=True)\n for level in wad['levels']:\n base_filename=path+wad['wad_name'].split('.')[-2]+'_'+level['name']\n # Path relative to the dataset root that will be stored in the database\n relative_path = base_filename.replace(root_path, '')\n # Adding the features\n for map in level['maps']:\n # Adding the corresponding path as feature for further access\n level['features']['path_{}'.format(map)] = relative_path + '_{}.png'.format(map)\n io.imsave(base_filename + '_{}.png'.format(map), level['maps'][map])\n for wadinfo in wad_info:\n # Adding wad info (author, etc) to the level features.\n if wadinfo not in level['features']: # Computed features have priority over provided features\n level['features'][wadinfo] = wad_info[wadinfo]\n # Completing the features with the level slot\n level['features']['slot'] = level['name']\n # Doing the same for the other features\n level['features']['path_json'] = relative_path + '.json'\n with open(base_filename + '.json', 'w') as jout:\n json.dump(level['features'], jout)\n # Saving the text representation\n with open(base_filename + '.txt', 'wb') as txtout:\n txtout.writelines([bytes(row + [10]) for row in level['text']])\n # Saving the graph\n if 'graph' in level:\n with open(base_filename + '.networkx', 'wb') as graphout:\n nx.write_gpickle(level['graph'], graphout)", "def level_creator(self, screen: pygame.Surface) -> None:\n self.name = input()\n f = open(normpath(self.name), 'w')\n work = True\n while work == True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n work = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.x1, self.y1 = event.pos\n self.checker[0] = True\n if event.type == pygame.MOUSEBUTTONUP:\n self.x2, self.y2 = event.pos\n self.checker[1] = True\n if event.type == pygame.KEYDOWN and chr(event.key) == 'o':\n self.level_write(f)\n work = False\n if self.checker[0] == self.checker[1] == True:\n if self.x1 > self.x2 and self.y1 > self.y2:\n self.y1, self.y2, self.x1, self.x2 = self.y2, self.y1, self.x2, self.x1\n elif self.x1 > self.x2 and self.y1 < self.y2:\n self.x1, self.x2 = self.x2, self.x1\n elif self.x1 < self.x2 and self.y1 > self.y2:\n self.y1, self.y2 = self.y2, self.y1\n self.objects.append(pygame.Rect(self.x1, self.y1, abs(\n self.x2-self.x1), abs(self.y2-self.y1)))\n self.checker[0], self.checker[1] = False, False\n self.draw(screen)", "def skeleton_getCreateDict(self, count = None):\n _short = self.mNode\n _str_func = 'skeleton_getCreateDict'\n log.debug(cgmGEN.logString_start(_str_func))\n\n \n mModule = self.moduleTarget \n\n _mod = self.getBlockModule()\n if not _mod:\n log.warning(\"|{0}| >> No module found for: {1}\".format(_str_func,blockType))\n return False \n\n #Validate mode data -------------------------------------------------------------------------\n try:_d_skeletonSetup = _mod.d_skeletonSetup\n except:_d_skeletonSetup = {}\n\n _mode = _d_skeletonSetup.get('mode',False)\n _targetsMode = _d_skeletonSetup.get('targetsMode','msgList')\n _targetsCall = _d_skeletonSetup.get('targets',False)\n _helperUp = _d_skeletonSetup.get('helperUp','y+')\n _countAttr = _d_skeletonSetup.get('countAttr','numberJoints')\n \n _l_targets = []\n\n log.debug(\"|{0}| >> mode: {1} | targetsMode: {2} | targetsCall: {3}\".format(_str_func,_mode,_targetsMode,_targetsCall))\n\n #...get our targets\n if _targetsMode == 'msgList':\n _l_targets = ATTR.msgList_get(_short, _targetsCall)\n elif _targetsMode == 'msg':\n _l_targets = ATTR.get_message(_short, _targetsCall)\n elif _targetsMode == 'self':\n _l_targets = [_short]\n elif _targetsMode == 'prerigHandles':\n _ml_rigHandles = self.msgList_get('prerigHandles',asMeta = True)\n if not _ml_rigHandles:\n raise ValueError, \"No rigHandles. Check your state\" \n \n #_ml_controls = [self] + _ml_rigHandles\n \n for i,mObj in enumerate(_ml_rigHandles):\n log.debug(\"|{0}| >> {1} | {2}\".format(_str_func,i,mObj.mNode))\n if mObj.getMessage('jointHelper'):\n _l_targets.append(mObj.jointHelper.mNode)\n else:\n _l_targets.append(mObj.mNode) \n else:\n raise ValueError,\"targetsMode: {0} is not implemented\".format(_targetsMode)\n \n if not _l_targets:\n log.error(\"|{0}| >> mode: {1} | targetsMode: {2} | targetsCall: {3}\".format(_str_func,_mode,_targetsMode,_targetsCall))\n raise ValueError, \"No targets found. Check your settings\"\n \n log.debug(\"|{0}| >> Targets: {1}\".format(_str_func,_l_targets))\n #pprint.pprint(vars())\n \n \"\"\"\n _helperOrient = ATTR.get_message(_short,'orientHelper')\n if not _helperOrient:\n log.debug(\"|{0}| >> No helper orient. Using root.\".format(_str_func)) \n _axisWorldUp = MATH.get_obj_vector(_short,_helperUp) \n else:\n log.debug(\"|{0}| >> Found orientHelper: {1}\".format(_str_func,_helperOrient)) \n _axisWorldUp = MATH.get_obj_vector(_helperOrient[0], _helperUp)\n log.debug(\"|{0}| >> axisWorldUp: {1}\".format(_str_func,_axisWorldUp)) \n \"\"\"\n\n if count:\n _joints = count\n else:\n _joints = ATTR.get(_short,_countAttr)\n\n #...get our positional data\n _d_res = {}\n\n if _mode in ['vectorCast','curveCast']:\n if _mode == 'vectorCast':\n _p_start = POS.get(_l_targets[0])\n _p_top = POS.get(_l_targets[1]) \n _l_pos = get_posList_fromStartEnd(_p_start,_p_top,_joints) \n elif _mode == 'curveCast':\n import cgm.core.lib.curve_Utils as CURVES\n _crv = CURVES.create_fromList(targetList = _l_targets)\n _l_pos = CURVES.returnSplitCurveList(_crv,_joints)\n mc.delete(_crv)\n _d_res['jointCount'] = _joints\n _d_res['helpers'] = {#'orient':_helperOrient,\n 'targets':_l_targets}\n elif _mode == 'handle':\n _l_pos = [POS.get(_l_targets[0])]\n _d_res['targets'] = _l_targets \n else:\n raise ValueError,\"mode: {0} is not implemented\".format(_mode) \n\n _d_res['positions'] = _l_pos\n _d_res['mode'] = _mode\n #_d_res['worldUpAxis'] = _axisWorldUp \n\n #pprint.pprint(_d_res)\n return _d_res", "def setup_level_4() -> object:\n #create level object\n level = Level()\n return level", "def setup_level_3() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, settings.HEIGHT, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 24, 49, level.wall_list)\n create_and_add_vertical_walls_to_list(24, settings.HEIGHT, 74, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 24, 4, level.wall_list) \n create_and_add_horiontal_walls_to_list(30, 49, 4, level.wall_list) \n create_and_add_horiontal_walls_to_list(4, 19, 24, level.wall_list)\n create_and_add_horiontal_walls_to_list(34, 74, 24, level.wall_list)\n \n #create rebels for level\n create_and_add_character_to_list(\"pics\\mystery_figure.png\", 0.12, 300, 490, level.character_list)\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.21, 230, 440, level.character_list)\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.21, 370, 440, level.character_list)\n\n #rebels greet player\n rebel_1_greet = Dialogue(200, 490, 100, 20, \"It's the lost king!\")\n level.dialogue_list.append(rebel_1_greet)\n rebel_2_greet = Dialogue(400, 490, 130, 40, \"We've spent so long\\ntrying to free you.\")\n level.dialogue_list.append(rebel_2_greet)\n rebel_3_greet = Dialogue(300, 540, 150, 40, \"You're our only hope,\\nkeep going.\")\n level.dialogue_list.append(rebel_3_greet)\n\n return level", "def prep_level(self):\r\n\t\tlevel_str=\"Level: \"+format(self.stats.level)\r\n\t\tself.level_image=self.font.render(level_str, True,\r\n\t\t\tself.text_color, self.ai_settings.bg_color)\r\n\r\n\t\t#Position the level below the score.\r\n\t\tself.level_rect=self.level_image.get_rect()\r\n\t\tself.level_rect.centerx=self.screen_rect.centerx*1.5\r\n\t\tself.level_rect.top=self.score_rect.top", "def skeleton_hand(save_path=None):\n edged = auto_hand_img() / 255\n im2 = to_rgba(np.copy(edged) * 255)\n skel = skeletonize(edged)\n im2[skel] = [255, 0, 0, 255]\n if save_path is None:\n return im2\n else:\n save_image(im2, (8,8), save_path)\n return im2", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def _commit_level(self):\n assert self.current_level is not None, \"Cannot write a level with an empty name\"\n # Create a new level descriptor in the lump directory\n self.wad.add_lump(self.current_level, None)\n # Add the lumps to WAD file\n self.wad.add_lump('THINGS', self.lumps['THINGS'])\n self.wad.add_lump('LINEDEFS', self.lumps['LINEDEFS'])\n self.wad.add_lump('SIDEDEFS', self.lumps['SIDEDEFS'])\n self.wad.add_lump('VERTEXES', self.lumps['VERTEXES'])\n self.wad.add_lump('SECTORS', self.lumps['SECTORS'])\n self.lumps = {'THINGS':Things(), 'LINEDEFS':Linedefs(), 'VERTEXES':Vertexes(),'SIDEDEFS': Sidedefs(), 'SECTORS':Sectors()}", "def level1Representation(self):\n rep = Level.load_rep('level/level1.txt')\n objects = {\n 'lever': {'l', 'm'},\n 'door': {'p', 'q'},\n 'player': {'j'},\n 'enemy': {'e'},\n 'object': {'a'},\n 'robot': {'r'}\n }\n toggle_objects = {\n 'l': {'p'},\n 'm': {'q'}\n }\n tile_map = {\n 'x': pygame.image.load('img/wall.png'),\n ',': pygame.image.load('img/dark_gray_tile.png'),\n '.': pygame.image.load('img/light_gray_tile.png'),\n '-': pygame.image.load('img/dark_red_tile.png')\n }\n level = Level(\n rep,\n objects,\n toggle_objects,\n self.width,\n self.height,\n Dimensions.width,\n Dimensions.height\n )\n class_map = {\n 'lever': Lever,\n 'robot': Robot,\n 'enemy': Enemy,\n 'player': MainCharacter,\n 'door': Door,\n 'object': Treasure\n }\n values = {\n 'l': {'image': 'img/lever_a_0.png', 'screen': self.screen},\n 'm': {'image': 'img/lever_b_0.png', 'screen': self.screen},\n 'p': {'toggled': False},\n 'q': {'toggled': False},\n 'j': {},\n 'e': {},\n 'a': {},\n 'r': {}\n }\n\n coords = level.coordinates(['x'])\n unwalkable = {x for k in coords for x in coords[k]}\n\n level1Dict = {\n 'levelIndex': 1,\n 'rep': rep,\n 'objects': objects,\n 'toggle_objects': toggle_objects,\n 'tile_map': tile_map,\n 'level': level,\n 'class_map': class_map,\n 'values': values,\n 'coords': coords,\n 'unwalkable': unwalkable,\n 'config_ai' : self.level1AI\n }\n return level1Dict", "def main():\n # ===================== Main Set-Up ===================== #\n pygame.init()\n \n FPS = 30\n frame_count = 0\n FPS_CLOCK = pygame.time.Clock()\n \n RESOLUTION = (800, 600)\n SCREEN = pygame.display.set_mode(RESOLUTION)\n \n pygame.display.set_caption(\"Save the Hero\")\n \n pygame.mouse.set_visible(False)\n \n # ===================== Sprite Set-Up ===================== #\n key = Key()\n \n # ===================== Other ===================== #\n level_list = []\n level_list.append(None) # Needed for indexing!\n level_list.append(Level1(SCREEN))\n level_list.append(Level2(SCREEN))\n level_list.append(Level3(SCREEN))\n \n title = Title_Screen(SCREEN)\n \n title.load_music()\n title.draw()\n SCREEN.blit(title.POINTER, (310, 230))\n \n difficulty = Difficulty_Screen(SCREEN)\n score = player_score.Player_Score(SCREEN)\n \n f_glow = False\n g_glow = False\n h_glow = False\n j_glow = False\n\n difficulty_screen_flag = False\n gameplay_screen_flag = False\n results_screen_flag = False\n initialization_flag = False\n \n # ===================== Start of Main Game Loop ===================== #\n while True:\n # Reset position of key\n key.rect.x = 0\n \n # ===================== Start of Key Listener ===================== # \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n # ===================== Menu Controls ===================== #\n if title.selection != 0:\n # Returns to title screen\n if event.key == K_ESCAPE: \n title.load_music()\n title.draw()\n \n # Reset values\n frame_count = 0\n \n score.reset_variables()\n title.reset_variables()\n difficulty.reset_variables()\n \n title.draw_pointer()\n \n gameplay_screen_flag = False\n results_screen_flag = False\n \n if (title.selection == 0 or title.selection == 1 and \n difficulty.selection == 0):\n # FOR MENU SELECTIONS ONLY!\n if event.key == K_DOWN:\n constants.sound_select.play()\n # Title screen\n if title.selection == 0:\n title.option += 1\n title.draw()\n title.draw_pointer()\n # Difficulty screen\n elif title.selection == 1:\n difficulty.option += 1\n difficulty.draw()\n difficulty.draw_pointer()\n if event.key == K_UP:\n constants.sound_select.play() \n # Title screen\n if title.selection == 0:\n title.option -= 1\n title.draw()\n title.draw_pointer()\n # Difficulty screen\n elif title.selection == 1:\n difficulty.option -= 1\n difficulty.draw()\n difficulty.draw_pointer()\n if event.key == K_RSHIFT:\n if difficulty.selection == 0:\n # No level was selected\n constants.sound_choose.play()\n if title.selection == 0:\n # Title screen\n title.selection = title.option\n \n if title.selection == 1:\n difficulty_screen_flag = True\n elif title.selection == 1:\n # Difficulty screen\n difficulty.selection = difficulty.option\n \n gameplay_screen_flag = True\n initialization_flag = True\n \n # ===================== End of Menu Controls ===================== #\n \n # ===================== Game Controls ===================== # \n if difficulty.selection != 0:\n # A track is selected\n if event.key == K_f:\n key.rect.x = 230\n f_glow = True\n if event.key == K_g:\n key.rect.x = 330\n g_glow = True\n if event.key == K_h:\n key.rect.x = 430\n h_glow = True\n if event.key == K_j:\n key.rect.x = 530\n j_glow = True\n \n if event.type == KEYUP:\n if difficulty.selection != 0:\n if event.key == K_f:\n f_glow = False\n if event.key == K_g:\n g_glow = False\n if event.key == K_h:\n h_glow = False\n if event.key == K_j:\n j_glow = False\n # ===================== End of Game Controls ===================== #\n \n # ===================== End of Key Listener ===================== #\n \n # Process selections\n if title.selection == 1: \n if difficulty_screen_flag:\n difficulty.draw()\n SCREEN.blit(difficulty.POINTER, (310, 230))\n \n difficulty_screen_flag = False\n if title.selection == 2:\n SCREEN.blit(pygame.image.load(\"images/other/rules.png\").convert(), \n (0,0))\n elif title.selection == 3:\n SCREEN.blit(pygame.image.load(\"images/other/credits.png\").convert(), \n (0,0))\n elif title.selection == 4:\n pygame.quit()\n sys.exit()\n\n # ===================== Start of Game ===================== #\n if gameplay_screen_flag:\n # One instance of the game\n if initialization_flag:\n # Empty the group of sprites\n level_list[difficulty.selection].note_sprites.empty()\n level_list[difficulty.selection].enemy_note_sprites.empty()\n # Generate a new list of notes\n level_list[difficulty.selection].load_notes()\n level_list[difficulty.selection].load_music()\n level_list[difficulty.selection].load_enemy_notes()\n \n hit = (pygame.image.load(\"images/other/hit.png\")\\\n .convert_alpha())\n slash = (pygame.image.load(\"images/other/slash.png\")\n .convert_alpha())\n\n initialization_flag = False\n \n level_list[difficulty.selection].draw()\n \n if f_glow:\n letter_f = constants.CALIBRI_60.render(\"F\", True, \n constants.GREY)\n SCREEN.blit(letter_f, (244, 544))\n if g_glow:\n letter_g = constants.CALIBRI_60.render(\"G\", True, \n constants.GREY)\n SCREEN.blit(letter_g, (338, 544))\n if h_glow:\n letter_h = constants.CALIBRI_60.render(\"H\", True, \n constants.GREY)\n SCREEN.blit(letter_h, (438, 544))\n if j_glow:\n letter_j = constants.CALIBRI_60.render(\"J\", True, \n constants.GREY)\n SCREEN.blit(letter_j, (548, 543))\n \n # Move NORMAL notes\n for note in level_list[difficulty.selection].note_sprites:\n note.fall()\n # Notes that fall out of the screen\n if note.rect.y > 600:\n level_list[difficulty.selection].note_sprites.remove(note)\n score.timing = \"MISS\"\n score.miss += 1\n \n constants.sound_hit.play() \n SCREEN.blit(hit, (400, 100))\n\n score.combo = 0\n \n note_hit_list = (pygame.sprite.spritecollide\n (key, level_list[difficulty.selection].note_sprites, \n True))\n \n # Timing calculation!\n for note in note_hit_list:\n if (note.rect.y == 530):\n score.timing = \"PERFECT\"\n score.points += 3\n score.perfect += 1\n elif (note.rect.y > 530 and note.rect.y <= 550):\n score.timing = \"GREAT\"\n score.points += 2\n score.great += 1\n elif (note.rect.y > 550 and note.rect.y <= 580):\n score.timing = \"COOL\"\n score.points += 1\n score.cool += 1\n \n score.combo += 1\n \n if score.combo > score.highest_combo:\n score.highest_combo = score.combo\n \n constants.sound_slash.play() \n SCREEN.blit(slash, (-100, 200))\n \n # Move ENEMY notes \n for note in level_list[difficulty.selection].enemy_note_sprites:\n note.fall()\n \n if note.rect.y > 550:\n (level_list[difficulty.selection]\n .enemy_note_sprites.remove(note))\n\n enemy_note_hit_list = (pygame.sprite.spritecollide\n (key, level_list[difficulty.selection]\n .enemy_note_sprites, True))\n \n if enemy_note_hit_list:\n gameplay_screen_flag = False\n results_screen_flag = True\n \n # Timer\n total_seconds = (level_list[difficulty.selection].music_length - \n (frame_count // FPS))\n minutes = total_seconds // 60\n seconds = total_seconds % 60\n time_string = \"Time Left: {0:02}:{1:02}\".format(minutes, seconds)\n \n if total_seconds < 0:\n total_seconds = 0\n \n # Draw score and timing\n draw_text(SCREEN, \"High Score: \" + \n str(score.high_scores[difficulty.selection]), 620, 20)\n draw_text(SCREEN, \"Points: \" + str(score.points), 620, 50)\n draw_text(SCREEN, time_string, 20, 20)\n \n timing = constants.CALIBRI_48.render(score.timing, True, \n constants.WHITE)\n SCREEN.blit(timing, (350, 150))\n \n if score.combo != 0 and score.timing != \"\":\n combo = constants.CALIBRI_40.render(str(score.combo) + \"x\", \n True, constants.WHITE)\n SCREEN.blit(combo, (370, 200))\n \n # Draw the notes\n level_list[difficulty.selection].note_sprites.draw(SCREEN)\n level_list[difficulty.selection].enemy_note_sprites.draw(SCREEN)\n \n if results_screen_flag:\n score.draw(level_list[difficulty.selection].total_notes)\n score.write_high_score(difficulty.selection)\n gameplay_screen_flag = False \n \n if (total_seconds == 0):\n results_screen_flag = True\n \n # Count frames to calculate total seconds\n frame_count += 1\n FPS_CLOCK.tick(FPS)\n \n pygame.display.update()\n \n # ===================== End of Game ===================== #\n \n # ===================== End of Main Game Loop ===================== #", "def create_scene(self):\n \n self.scene=soya.World()", "def prepare():\n # getRelativeArt checks if scene is saved\n skeleton_path = paths.getRelativeArt()\n\n # if scene is modified, ask user if they would like to save, not save, or cancel operation\n if not uiwindow.save():\n pm.error('Scene not saved.')\n\n # perform a bone health check before referencing to emphasize any possible errors\n bone.health()\n\n # create new file, reference the skeleton into the new file, create rig group\n pm.newFile(force=True)\n rig_grp = pipernode.createRig()\n pm.createReference(skeleton_path, namespace=pcfg.skeleton_namespace)\n pm.createReference(skeleton_path, namespace=pcfg.bind_namespace)\n skinned_meshes = pipernode.get('piperSkinnedMesh')\n [node.visibility.set(False) for node in skinned_meshes if node.name().startswith(pcfg.bind_namespace)]\n pm.parent(skinned_meshes, rig_grp)\n lockMeshes()\n\n return rig_grp", "def prep_level(self):\n\t\tself.level_image = self.font.render(\"Level: \" + str(self.stats.level), True, self.text_color,self.ai_settings.bg_color)\n\n\t\t#Display the score 10 pixels below the scoreboard.\n\t\tself.level_rect = self.level_image.get_rect()\n\t\tself.level_rect.right = self.score_rect.right\n\t\tself.level_rect.top = self.score_rect.bottom + 10", "def save(self):\n f = open(os.path.join(self.gui.lnp.init_dir, 'init.txt'), 'w')\n f.write(self.init_text.get('1.0', 'end'))\n f.close()\n f = open(os.path.join(self.gui.lnp.init_dir, 'd_init.txt'), 'w')\n f.write(self.d_init_text.get('1.0', 'end'))\n f.close()\n self.gui.load_params()", "def prep_level(self):\n level_str = f\"Level {self.game.level}\"\n self.level_image = self.font.render(level_str, True, self.text_color)\n\n # Position level below the score.\n self.level_rect = self.level_image.get_rect()\n self.level_rect.right = self.score_rect.right\n self.level_rect.top = self.score_rect.bottom + 10", "def newGame(self) -> None:\n self.save_location = f\"{DEFAULT_SAVE_LOCATION}/{DEFAULT_SAVE_NAME}\"\n\n with open(SAVE_TEMPLATE, \"r\") as f:\n return json.load(f)", "def create_visualization(levelname, leveldirectory, spritesdirectory):\r\n\t#Load sprites\r\n\tsprites = {}\r\n\tfor filename in glob.glob(f\"{spritesdirectory}/**/*.png\", recursive=True):\r\n\t\tim = Image.open(filename)\r\n\t\tname = filename.split(\"/\")[-1][:-4]\r\n\t\tsprites[name] = im.convert(\"RGBA\")\r\n\r\n\tlevel = {}\r\n\twith open(f\"{leveldirectory}/{levelname}.txt\") as fp:\r\n\t\tfor y, line in enumerate(fp):\r\n\t\t\tlevel[y] = line[:-1]\r\n\t\t\tprint(f\"{y}:\")\r\n\t\t\tprint(line)\r\n\r\n\tmaxX = len(level[0])\r\n\tmaxY = y+1\r\n\tprint(f\"Max y is {y}\")\r\n\r\n\r\n\r\n\t#Create backdrop of tiled plains sprites to which to write actual sprites\r\n\tdef createTiledPlainsImage():\r\n\t\timage = Image.new(\"RGB\", (maxX*16, (maxY)*16), color=(91, 153, 254))\r\n\t\tpixels = image.load()\r\n\r\n\t\timageToUse = sprites[Tile.reverse_lookup[\"P\"].filename]\r\n\t\tpixelsToUse = imageToUse.load()\r\n\t\tfor y in range(0, maxY):\r\n\t\t\tfor x in range(0, maxX):\r\n\t\t\t\tfor x2 in range(0, 16):\r\n\t\t\t\t\tfor y2 in range(0, 16):\r\n\t\t\t\t\t\tpixels[x*16+x2,y*16+y2] = pixelsToUse[x2,y2][:-1]\r\n\t\treturn image, pixels\r\n\r\n\timage, pixels = createTiledPlainsImage()\r\n\r\n\t#Draw the actual building/terrain sprites to the image\r\n\tfor y in range(0, maxY):\r\n\t\tfor x in range(0, maxX):\r\n\t\t\timageToUse = None\r\n\t\t\tprint(y)\r\n\t\t\tprint(maxY)\r\n\t\t\tprint(levelname)\r\n\t\t\tprint(f\"{x}, {y}\")\r\n\t\t\tif level[y][x] in Tile.reverse_lookup.keys():\r\n\t\t\t\tprint(Tile.reverse_lookup[level[y][x]])\r\n\t\t\t\timageToUse = sprites[Tile.reverse_lookup[level[y][x]].filename]\r\n\t\t\tif not imageToUse == None:\r\n\t\t\t\tpixelsToUse = imageToUse.load()\r\n\t\t\t\tx2max = imageToUse.size[0]\r\n\t\t\t\ty2max = imageToUse.size[1]\r\n\t\t\t\tfor x2 in range(0, x2max):\r\n\t\t\t\t\tfor y2 in range(0, y2max):\r\n\t\t\t\t\t\tif pixelsToUse[x2,y2][3]>0:\r\n\t\t\t\t\t\t\tupwardoffset = y2max-16\r\n\t\t\t\t\t\t\tywritepixel = y*16+y2-upwardoffset if y*16+y2-upwardoffset>=0 else y*16+y2\r\n\t\t\t\t\t\t\t#print(ywritepixel)\r\n\t\t\t\t\t\t\t#ywritepixel=y*16+y2\r\n\t\t\t\t\t\t\tpixels[x*16+x2,ywritepixel] = pixelsToUse[x2,y2][:-1]\r\n\r\n\t#save the resulting level image\r\n\tabsleveldir = os.path.abspath(f\"{leveldirectory}\")\r\n\tprint(leveldirectory)\r\n\tprint(absleveldir)\r\n\timage.save(rf\"{absleveldir}/{levelname}.png\",\"PNG\")" ]
[ "0.6555112", "0.63954264", "0.60957974", "0.59587157", "0.595166", "0.5925911", "0.5789536", "0.5764247", "0.57258284", "0.572253", "0.5666389", "0.56209296", "0.56123316", "0.5584476", "0.5532978", "0.5499502", "0.54859984", "0.5485555", "0.54709125", "0.5452631", "0.5421089", "0.53948385", "0.536186", "0.53597265", "0.5355685", "0.5352639", "0.53481346", "0.53475124", "0.5327205", "0.52980167" ]
0.6491367
1
Route to update project the function has two operations based on the request method.
def update_project(id): if request.method == "POST": result = update_project_to_db( id, request.form["title"], request.form["link"], request.form["description"] ) flash(result) return redirect(url_for("portfolio")) else: project = get_project(id) return render_template("edit_project.html", **project)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def update_project(project_id):\n\n project = mongo.db.projects\n project.find_one_and_update({'_id': ObjectId(project_id) },\n {'$set':\n {'title': request.form.get('title'),\n 'status': request.form.get('status'),\n 'deadline': datetime.strptime(request.form.get('deadline'), '%d/%m/%Y'),\n 'note': request.form.get('note'),\n 'brief': request.form.get('brief')}})\n return redirect(url_for('projects'))", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def put(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n # collect the json from the request\n project_json = simplejson.loads(self.request.body)\n # update the project record\n project = helpers.apply_json_to_model_instance(project, project_json)\n # save the updated data\n project.put()\n \n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(project_json))\n \n else:\n self.response.set_status(404, \"Project not found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def put(self,request, pk =None):\n return Response({'method': 'PUT'})", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def put(self ,request, pk = None):\r\n\r\n return Response({'method ': 'put'})", "def edit_project(request, game_project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n game_project = get_object_or_404(GameProject, pk=game_project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if game_project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n game_project_form = ProjectForm(\n request.POST,\n request.FILES,\n instance=game_project\n )\n if game_project_form.is_valid():\n game_project_form.save(commit=False)\n game_project.owner = profile\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n game_project.save()\n messages.success(request, 'Successfully updated project!')\n return redirect(reverse('project_detail', args=[game_project.id]))\n else:\n messages.error(\n request,\n 'Failed to update project. Please ensure the form is valid.'\n )\n else:\n game_project_form = ProjectForm(instance=game_project)\n messages.info(request, f'You are editing {game_project.title}')\n\n template = 'gameproject/edit_project.html'\n context = {\n 'game_project_form': game_project_form,\n 'game_project': game_project,\n }\n\n return render(request, template, context)", "def put(self, request, pk=None):\n\n return Response({'method': 'put'})", "def put(self, request, pk=None):\n return Response({'method': 'patch'})", "def update_project(project_id, action):\n log.info('Started updating project')\n\n import json\n from lib import pycurl\n from StringIO import StringIO\n\n buffer = StringIO()\n project_deleted = None\n\n # Set access variables\n api_key = wf.get_password('10k_api_key')\n url = 'https://api.10000ft.com/api/v1/projects/' + \\\n str(project_id) + '?auth=' + str(api_key)\n\n # Determine other variables based on the action\n if action == 'archive_project':\n data = json.dumps({\"id\": project_id, \"archived\": \"true\"})\n request_method = \"PUT\"\n status = 'Archived: '\n if action == 'delete_project':\n data = json.dumps({})\n request_method = \"DELETE\"\n status = 'Deleted: '\n\n # Do the request\n c = pycurl.Curl()\n c.setopt(c.URL, url)\n c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])\n c.setopt(pycurl.CUSTOMREQUEST, request_method)\n c.setopt(pycurl.POSTFIELDS, data)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n c.close()\n\n # Capture the response and store the json in a dictionary\n result = buffer.getvalue()\n log.info('Request is finished. Result from 10.000ft: ' + str(result))\n\n project = ''\n\n try:\n # Test if result is valid JSON\n project = json.loads(result)\n except ValueError, e:\n # When the project is deleted, 10.000ft responds with an empty string\n # (no JSON)\n project_deleted = True\n\n # Finishing up based on response from 10.000ft\n if project_deleted is True:\n # The project is deleted!\n log.info('The project with id ' + project_id +\n ' is succesfully deleted from 10.000ft')\n notify_title = 'Your project is deleted!'\n notify_text = 'The project is succesfully deleted from 10.000ft'\n\n update_data('force')\n\n elif 'id' in project:\n # If we get an object with a project ID this means that the project\n # update of data was succesfull\n log.debug('Processed result to project: ' + str(project))\n # If everything goes well 10.000ft returns all the updated project info\n notify_title = 'Your project is updated!'\n notify_text = status + project['name']\n\n # Initiate force update\n update_data('force')\n\n elif 'message' in project:\n # 10.000ft returns a message if something went wrong\n notify_title = 'Something went wrong :-/'\n notify_text = project['message']\n log.info(\n 'Something went wrong :-/.'\n ' Message from 10.000ft: ' + str(project['message']))\n\n else:\n notify_title = 'An error occured :-/)'\n notify_text = 'Check the log files for more information'\n\n return notify(notify_title, notify_text)", "def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = project['deadline'].strftime('%d/%m/%Y')\n form.brief.data = project['brief']\n form.note.data = project['note']\n return render_template('pages/editproject.html', form=form, project=project, legend='Edit your project')", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self):\n update_url = f'{self._tower.api}/projects/{self.id}/update/'\n response = self._tower.session.post(update_url)\n if not response.ok:\n self._logger.error(f\"Error updating the project '{self.name}'. response was: {response.text})\")\n return response.json() if response.ok else {}", "def patch(self, project_id):\n authenticated_user_id = token_auth.current_user()\n if not ProjectAdminService.is_user_action_permitted_on_project(\n authenticated_user_id, project_id\n ):\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n try:\n project_dto = ProjectDTO(request.get_json())\n project_dto.project_id = project_id\n project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\"Error\": \"Unable to update project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n ProjectAdminService.update_project(project_dto, authenticated_user_id)\n return {\"Status\": \"Updated\"}, 200\n except InvalidGeoJson as e:\n return {\"Invalid GeoJson\": str(e)}, 400\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})", "def patch(self, request , pk=None):\n return Response({'message':'PATCH'})", "def patch(self,request,pk = None):\n return Response({'method': 'PATCH'})", "def update():\n return 'update api in put'", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def put(self, request, pk=None):\n return Response({'method': 'PUT'})", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def patch(self, request, pk=None):\n\n return Response({'method': 'patch'})", "def put(self,request,pk=None):\n return Response({'method':'Put'})" ]
[ "0.7155244", "0.6591015", "0.6561205", "0.64658177", "0.64653176", "0.6429136", "0.6314935", "0.62550044", "0.6197096", "0.6190579", "0.6131291", "0.6110216", "0.61091644", "0.61053795", "0.6087666", "0.6058647", "0.602651", "0.602456", "0.60025024", "0.5998193", "0.5983591", "0.59544796", "0.59422547", "0.59081197", "0.5897855", "0.5897855", "0.5897855", "0.5867616", "0.586622", "0.58509624" ]
0.6630064
1
Route to add project. The function has two operations based on the request method
def add_project(): if request.method == "POST": result = add_project_to_db( request.form["title"], request.form["link"], request.form["description"] ) flash(result) return redirect(url_for("portfolio")) else: return render_template("add_project.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projects_route():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n if post_data is not None:\n add_project(post_data)\n response_object['message'] = 'Project added!'\n else:\n response_object['projects'] = get_projects()\n return jsonify(response_object)", "def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)", "def post(self, request, formal=None):\n serializers = ProjectSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data, status=status.HTTP_201_CREATED)\n permission_classes=(IsAdminOrReadOnly)\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)", "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def add_task():\n found = False\n project_id = None\n task = request.form['task']\n project = request.form['project']\n \n if not task:\n return redirect('/')\n\n if not project:\n project = 'Tasks'\n\n projects = Projects.query.all()\n\n for proj in projects:\n if proj.project_name == project:\n found = True\n\n # add the project if not in database already\n if not found:\n add_project = Projects(project, True)\n db.session.add(add_project)\n db.session.commit()\n projects = Projects.query.all()\n\n # set the active tab\n for proj in projects:\n if proj.project_name == project:\n project_id = proj.project_id\n proj.active = True\n else:\n proj.active = False\n\n status = bool(int(request.form['status']))\n\n # add the new task\n new_task = Tasks(project_id, task, status)\n db.session.add(new_task)\n db.session.commit()\n return redirect('/')", "def get_project_add_form():\n\n return render_template(\"project_add.html\")", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def post(self, request):\n body = request.body.decode(\"utf-8\")\n print(body)\n print(request.META)\n if not body:\n return HttpResponse(status=HTTPStatus.BAD_REQUEST)\n\n data = json.loads(body)\n project_name = data['name']\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n existing_projects = [project['name'] for project in serializer.data]\n if project_name in existing_projects:\n return Response(status=HTTPStatus.CONFLICT)\n\n project_location = os.path.join(PROJECTS_FOLDER, project_name+'.aedt')\n project = Project.objects.create(name=project_name, project_location=project_location)\n project.save()\n return HttpResponse(HTTPStatus.OK)", "def test_add_project(self):\n pass", "def create(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save(permission=\"contributor\", role=\"Contributor\")\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_create_project_request(self):\n pass", "def on_add(self, project, name, **kwargs):\n pass", "def post_project_create(self, resource_dict):\n pass", "def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)", "def add_project_to_groups(projectname):\n groups = request.get_json().get(\"groups\", [])\n return jsonify(\n admin.add_project_to_groups(\n current_app.scoped_session(), username, groups=groups\n )\n )", "def post(self, *args, **kwargs):\n name = self.get_argument('name', None)\n description = self.get_argument('description', None)\n url = self.get_argument('url', None)\n leader = self.get_argument('leader', None)\n members = self.get_argument('members', None)\n teams = self.get_argument('teams', None)\n repos = self.get_argument('repos', None)\n tags = self.get_argument('tags', None)\n if 'user' not in kwargs:\n self.raise401()\n\n try:\n # todo - better arguments handler\n url = url.strip()\n url = url if url else None\n members_list = []\n repos_list = []\n teams_list = []\n project_leader = kwargs['user']\n if leader:\n project_leader = User.objects(username=leader).first()\n\n if repos:\n for repo in parse_listed_strs(repos):\n r = Repo.objects(name=repo).first()\n if not r:\n continue\n repos_list.append(r)\n if members:\n for member in parse_listed_strs(members):\n u = User.objects(username=member).first()\n if not u or u == project_leader:\n continue\n members_list.append(u)\n if teams:\n for team in parse_listed_strs(teams):\n t = Team.objects(name=team).first()\n if not t:\n continue\n teams_list.append(t)\n members_list.append(project_leader)\n tags_list = parse_listed_strs(tags)\n project = Project(\n name=name, description=description,\n url=url, repos=repos_list,\n leader=project_leader, members=members_list,\n teams=teams_list, tags=tags_list)\n project.save()\n project_data = document_to_json(project, filter_set=_FILTER)\n self.set_status(201)\n self.write(project_data)\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response", "def _post_project(prj=None):\n template_path = (os.path.join(\n os.path.split(__file__)[0], \"post_project_template.xml\"))\n with open(template_path, 'r') as file:\n template = Template(file.read())\n response_xml = template.render(\n name=f\"Project_TEST_{datetime.now()}\",\n open_date=str(datetime.today().date()),\n res_uri=f\"{LIMS_API.tools.api.host}researchers/1\")\n\n prj_response = LIMS_API.tools.api.post(\n f\"{LIMS_API.tools.api.host}projects\", response_xml)\n\n prj_response_soup = BeautifulSoup(\n prj_response, \"xml\").find(\"prj:project\")\n prj = api_types.Project(\n prj_response_soup.find(\"name\"),\n DEFAULT_RES,\n datetime.today().date(),\n [],\n prj_response_soup[\"uri\"])\n\n return prj", "def get_project_route(id):\n response_object = {'status': 'success'}\n if request.method == 'GET':\n response_object['project'] = get_project(id)\n return jsonify(response_object)", "def add_project(project, network, id):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.add_project(project, network, id)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def post(self, project_slug):\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n\n set_target_registry(args)\n return self.handle_write(project, data=args)", "def createProject(self, payLoad):\n\n uri = \"/v1/projects/\" \n response = self.client.post(uri, payLoad)\n return response", "def process_project_route(id):\n response_object = {'status': 'success'}\n if request.method == 'POST':\n with database.engine.begin() as connection:\n app = flask.current_app\n params = request.get_json()['params']\n task = current_user.launch_task('morphocut_server.api.process_project',\n 'Processing project...', id, id, params)\n response_object['job_id'] = task.id\n print(\"return process\")\n return jsonify(response_object), 202", "def add(self, name, project):\n self.projects[name] = project", "def add_project(self, project=None):\n is_project = type(project) is Project\n id_exists = project.client_id in [c.client_id for c in self.client_list]\n pid_exists = project.project_id() in [p.project_id() for p in self.project_list]\n\n # cancel if it's no project or the client_id does not exist\n # or the project_id already exists\n if not is_project or not id_exists or pid_exists:\n return False\n\n # add the project\n self.project_list.append(project)\n self.save_project_to_file(project=project)\n return True" ]
[ "0.71713793", "0.6893478", "0.6753157", "0.66714686", "0.6621897", "0.64958745", "0.64496964", "0.6436584", "0.63982916", "0.63779765", "0.6225331", "0.61893666", "0.6065161", "0.60603213", "0.60536176", "0.6027334", "0.5993403", "0.59387577", "0.5909622", "0.58471173", "0.5797314", "0.57965314", "0.5774266", "0.5756656", "0.57458633", "0.574571", "0.5737518", "0.57171696", "0.5706389", "0.56907684" ]
0.72942376
0
Basic test to make sure the home directory is returned. Also checks to see if the passed in project_name is appended to the path in the returned value.
def test_find_home_directory(): dir = find.home_direcotry() nt.ok_(os.path.exists(dir)) project_name = 'test' dir = find.home_direcotry(project_name) project_name = ".{0}".format(project_name) nt.ok_(dir.endswith(project_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_projects_dir():\n # assert path.get_projects_dir() == '/home/pfanelli/python-devel'\n pass", "def test_get_home_directory(self, subprocess_mock, _, _2):\n subprocess_mock.return_value = b'HOME_DIRECTORY'\n self.assertEqual('HOME_DIRECTORY',\n iossim_util.get_home_directory('iPhone 11', '13.2.2'))\n self.assertEqual([\n 'xcrun', 'simctl', 'getenv', 'A4E66321-177A-450A-9BA1-488D85B7278E',\n 'HOME'\n ], subprocess_mock.call_args[0][0])", "def test_project_path(self):\n\n # Without arguments\n project_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..'\n ))\n self.assertEqual(project_root_path, paths.project())\n\n # With arguments\n self.assertEqual(\n MY_PATH,\n paths.project('tracksim', 'tests', 'test_tracksim.py')\n )", "def test_project_path(self):\n ep = exposed.ExposedProject()\n project = MagicMock()\n project.source_directory = os.path.realpath(os.path.dirname(__file__))\n ep.load(project)\n result = ep.path('hello.md')\n self.assertTrue(result.endswith('{}hello.md'.format(os.sep)))", "def get_home_dir(self) -> str:\n ret = os.path.expanduser(\"~\")\n if not os.path.exists(ret):\n raise RuntimeError(\"The home directory does not exist.\")\n return ret", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def test_get_working_directory():\n\n working_directory = application_services.get_working_directory()\n assert os.path.abspath('.') == working_directory", "def get_home():\n try:\n return str(Path.home())\n except Exception:\n return None", "def test_get_qiime_project_dir(self):\r\n\r\n # Do an explicit check on whether the file system containing\r\n # the current file is case insensitive. This is in response\r\n # to SF bug #2945548, where this test would fail on certain\r\n # unusual circumstances on case-insensitive file systems\r\n # because the case of abspath(__file__) was inconsistent.\r\n # (If you don't believe this, set case_insensitive_filesystem\r\n # to False, and rename your top-level Qiime directory as\r\n # qiime on OS X. That sould cause this test to fail as\r\n # actual will be path/to/qiime and expected will be\r\n # path/to/Qiime.) Note that we don't need to change anything\r\n # in the get_qiime_project_dir() function as if the\r\n # file system is case insenstive, the case of the returned\r\n # string is irrelevant.\r\n case_insensitive_filesystem = \\\r\n exists(__file__.upper()) and exists(__file__.lower())\r\n\r\n actual = get_qiime_project_dir()\r\n # I base the expected here off the imported location of\r\n # qiime/util.py here, to handle cases where either the user has\r\n # Qiime in their PYTHONPATH, or when they've installed it with\r\n # setup.py.\r\n # If util.py moves this test will fail -- that\r\n # is what we want in this case, as the get_qiime_project_dir()\r\n # function would need to be modified.\r\n import qiime.util\r\n util_py_filepath = abspath(abspath(qiime.util.__file__))\r\n expected = dirname(dirname(util_py_filepath))\r\n\r\n if case_insensitive_filesystem:\r\n # make both lowercase if the file system is case insensitive\r\n actual = actual.lower()\r\n expected = expected.lower()\r\n self.assertEqual(actual, expected)", "def __validate_home_dir(self, home, login, system, force):\n\n\t\tif system:\n\t\t\tif home:\n\t\t\t\tif os.path.exists(home) and not force:\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified directory '\n\t\t\t\t\t\t'{0} for system user {1} already exists. If you '\n\t\t\t\t\t\t'really want to use it, please use the --force '\n\t\t\t\t\t\t'argument.').format(stylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login)))\n\n\t\t\t\tif not home.startswith(\n\t\t\t\t\tsettings.defaults.home_base_path) \\\n\t\t\t\t\tand not home.startswith('/var') \\\n\t\t\t\t\tor home.startswith(LMC.configuration.groups.base_path) \\\n\t\t\t\t\tor home.find('/tmp') != -1:\n\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is outside {2} '\n\t\t\t\t\t\t'and /var, or inside {3} or a temporary '\n\t\t\t\t\t\t'directory (/var/tmp, /tmp). This is unsupported, '\n\t\t\t\t\t\t'Aborting.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login),\n\t\t\t\t\t\tsettings.defaults.home_base_path,\n\t\t\t\t\t\tLMC.configuration.groups.base_path))\n\n\t\t\t\tif home in (user.homeDirectory for user in self):\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is already owned '\n\t\t\t\t\t\t'by another user. Please choose another one.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME, login)))\n\n\t\t\t\treturn home\n\t\telse: # not system\n\t\t\tif home:\n\t\t\t\tlogging.warning(_(u'Specifying an alternative home directory '\n\t\t\t\t\t'is not allowed for standard users. Using standard home '\n\t\t\t\t\t'path {0} instead.').format(\n\t\t\t\t\t\tstylize(ST_PATH, '%s/%s' % (\n\t\t\t\t\t\t\tLMC.configuration.users.base_path, login))))\n\n\t\treturn \"%s/%s\" % (LMC.configuration.users.base_path, login)", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def path_home_mock():\n raise AttributeError()", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def test_config_home_custom_home_dir():\n cache_folder = os.path.join(temp_folder(), \"custom\")\n with environment_append({\"CONAN_USER_HOME\": cache_folder}):\n client = TestClient(cache_folder=cache_folder)\n client.run(\"config home\")\n assert cache_folder in client.out\n client.run(\"config home --json home.json\")\n _assert_dict_subset({\"home\": cache_folder}, json.loads(client.load(\"home.json\")))", "def test_config_home_short_home_dir():\n cache_folder = os.path.join(temp_folder(), \"custom\")\n with environment_append({\"CONAN_USER_HOME_SHORT\": cache_folder}):\n with pytest.raises(ConanException) as excinfo:\n TestClient(cache_folder=cache_folder)\n assert \"cannot be a subdirectory of the conan cache\" in str(excinfo.value)", "def homeDirectory(self, ignored_value):\n\t\tself.__homeDirectory = self._resolve_home_directory()", "def check_cc_project_dir():\r\n if len(cc_env.proj_dir) > 140:\r\n msg = ('ERROR: Project directory \"' + cc_env.proj_dir +\r\n '\" is too deep. Please choose a shallow directory'\r\n r'(something like \"C:\\PUMA\").')\r\n raise Exception(msg)\r\n\r\n if (\"-\" in cc_env.proj_dir or \" \" in cc_env.proj_dir or\r\n \".\" in cc_env.proj_dir):\r\n msg = ('ERROR: Project directory cannot contain spaces, dashes, or '\r\n 'special characters.')\r\n raise Exception(msg) \r\n return", "def _get_home():\n from os.path import expanduser\n home = expanduser('~')\n return home", "def test_get_application_root_path():\n\n root_path = os.path.abspath(os.path.join(os.path.abspath('.'), 'tests'))\n assert application_services.get_application_root_path() == root_path", "def homedir(options=['/home/jennifer/', '/home/jwalker/',\n 'C:/Users/jenfl/']):\n\n home = None\n for h in options:\n if os.path.isdir(h):\n home = h\n if home is None:\n raise ValueError('Home directory not found in list of options.')\n return home", "def test_root() -> Path:\n return TEST_ROOT", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def get_home_directory():\n \n try:\n directory=os.path.expanduser(\"~\")\n except EnvironmentError:\n directory=None\n \n return directory", "def test_get_project(self):\n pass", "def homedir():\n return os.path.expanduser('~')", "def get_home_dir(self, username):\n return self.user_table[username]['home']", "def test_home_exists(self):\n response = self.app.get('/')\n self.assertEqual(response.status_code, 200)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def inHome(resname):\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return True\n else:\n return False", "def test_good_projects(self):\n # name path main_lang\n self.do_test_good('bar', 'tmp/bunny', 'py')\n self.do_test_good('banana', 'tmp/frog', 'c')\n self.do_test_good('grinch', 'tmp/abc/def')\n self.do_test_good('grinch', 'tmp/pqr')" ]
[ "0.683315", "0.6561728", "0.6349326", "0.62901163", "0.6190666", "0.6129038", "0.6125085", "0.6117744", "0.6084706", "0.60805786", "0.6034056", "0.6018535", "0.5919452", "0.59177035", "0.59023166", "0.58581406", "0.58504355", "0.5825954", "0.5818992", "0.58094126", "0.5795476", "0.57684153", "0.5761381", "0.5759554", "0.5712052", "0.5699473", "0.5695353", "0.5695306", "0.5690777", "0.5676611" ]
0.7624633
0
Test to make sure the system config directory is returned. Also checks to see if the passed in project_name is appended to the path in the returned value.
def test_find_system_config_directory(): dir = find.system_config_directory() nt.ok_(os.path.exists(dir)) project_name = 'test' dir = find.system_config_directory(project_name) nt.ok_(dir.endswith(project_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_find_project_config_directory():\n dir = find.project_config_directory(False)\n nt.ok_(os.path.exists(dir))\n\n dir = find.project_config_directory()\n nt.ok_(dir.endswith('config'))", "def test_config_get(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config get project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_get_projects_dir():\n # assert path.get_projects_dir() == '/home/pfanelli/python-devel'\n pass", "def test_find_no_share(self):\n with pytest.raises(EnvironmentError):\n with patch(\"os.getenv\", Mock(return_value=False)):\n services.find_config_path(FILES)", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())", "def test_get_settings_path():\n\n root_path = application_services.get_application_main_package_path()\n settings_path = os.path.abspath(os.path.join(root_path, 'settings'))\n assert application_services.get_settings_path() == settings_path", "def check_configure_scan(project_path):\n for file_name in CONFIGURE_AC_NAMES:\n file_path = os.path.join(project_path, file_name)\n if os.path.exists(file_path):\n return file_path\n return None", "def test_config_set(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('config set project name \"Test project\"')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('Test project',\n self.env.config.get('project', 'name'))", "def test_sysconfdir(self):\n self.chck_triple('sysconfdir')", "def test_get_upload_directory(self, mock_config_file):\n configuration = Configuration()\n assert configuration.upload_dir == os.path.join(configuration.app_workdir, '.labmanager', 'upload')", "def test_get_config_replace_by_os_env():\n value = 'MIMIMIMIMIMIMI'\n os.environ[\"SOURCE_FOLDER\"] = value\n assert get_config('SOURCE_FOLDER') == value", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def _findConfigPath(self, name):\n for path in reversed(self._makeAllConfigPaths(name)):\n if os.path.exists(path):\n return path", "def test_local_filepath_helper():\n expected_local_filepath = TEST_LOCAL_CONFIG_PATH.replace('.cfg', '_local.cfg')\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH) == TEST_LOCAL_CONFIG_PATH\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH, True) == expected_local_filepath", "def test_get_default_settings_path():\n\n root_path = application_services.get_pyrin_main_package_path()\n default_settings_path = os.path.abspath(os.path.join(root_path, 'settings', 'default'))\n assert application_services.get_default_settings_path() == default_settings_path", "def test_project_path(self):\n\n # Without arguments\n project_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..'\n ))\n self.assertEqual(project_root_path, paths.project())\n\n # With arguments\n self.assertEqual(\n MY_PATH,\n paths.project('tracksim', 'tests', 'test_tracksim.py')\n )", "def _assert_predefined_config_path(\n self,\n framework: str,\n domain: str,\n domain_flavour: str,\n expected_filename: str,\n ) -> None:\n result = get_predefined_config_path(framework, domain, domain_flavour)\n expected = os.path.join(\n os.path.abspath(\n os.path.dirname(\n inspect.getfile(get_predefined_config_path),\n ),\n ),\n \"configs\",\n \"predefined_configs\",\n f\"{framework}\",\n expected_filename,\n )\n self.assertEqual(result, expected)\n self.assertEqual(os.path.isfile(result), True)", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def _get_config_dirs(project=None):\n snap = os.environ.get('SNAP')\n snap_c = os.environ.get('SNAP_COMMON')\n\n cfg_dirs = [\n _fixpath(os.path.join('~', '.' + project)) if project else None,\n _fixpath('~'),\n os.path.join('/etc', project) if project else None,\n '/etc',\n os.path.join(snap_c, \"etc\", project) if snap_c and project else None,\n os.path.join(snap, \"etc\", project) if snap and project else None,\n ]\n return [x for x in cfg_dirs if x]", "def test_find_config_local(tmpdir, config_dir):\n working_dir = tmpdir.mkdir(\"working\")\n working_dir.join('local.cfg').write('')\n\n with cd(working_dir.strpath):\n found_config = find_config(config_dir.strpath, 'local.cfg')\n assert found_config == working_dir.join('local.cfg').strpath\n\n found_config = find_config(config_dir.strpath, 'local')\n assert found_config == working_dir.join('local.cfg').strpath\n\n found_config = find_config(config_dir.strpath, 'config')\n assert found_config == config_dir.join('config.cfg').strpath", "def test_not_github(self):\n project_src_path = 'project-src'\n os.environ['PROJECT_SRC_PATH'] = project_src_path\n generic_ci_env = platform_config.BasePlatformConfig()\n self.assertEqual(generic_ci_env.project_src_path, project_src_path)", "def test_ignore_non_configs_from_current_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n\n junk_config = tmp_path / \"myconfig.psd\"\n junk_config.touch()\n conf = tmp_path / \"watmyconfig.json\"\n conf.touch()\n configs_found = in_dir(tmp_path)\n assert len(configs_found) == 1", "def test_get_qiime_project_dir(self):\r\n\r\n # Do an explicit check on whether the file system containing\r\n # the current file is case insensitive. This is in response\r\n # to SF bug #2945548, where this test would fail on certain\r\n # unusual circumstances on case-insensitive file systems\r\n # because the case of abspath(__file__) was inconsistent.\r\n # (If you don't believe this, set case_insensitive_filesystem\r\n # to False, and rename your top-level Qiime directory as\r\n # qiime on OS X. That sould cause this test to fail as\r\n # actual will be path/to/qiime and expected will be\r\n # path/to/Qiime.) Note that we don't need to change anything\r\n # in the get_qiime_project_dir() function as if the\r\n # file system is case insenstive, the case of the returned\r\n # string is irrelevant.\r\n case_insensitive_filesystem = \\\r\n exists(__file__.upper()) and exists(__file__.lower())\r\n\r\n actual = get_qiime_project_dir()\r\n # I base the expected here off the imported location of\r\n # qiime/util.py here, to handle cases where either the user has\r\n # Qiime in their PYTHONPATH, or when they've installed it with\r\n # setup.py.\r\n # If util.py moves this test will fail -- that\r\n # is what we want in this case, as the get_qiime_project_dir()\r\n # function would need to be modified.\r\n import qiime.util\r\n util_py_filepath = abspath(abspath(qiime.util.__file__))\r\n expected = dirname(dirname(util_py_filepath))\r\n\r\n if case_insensitive_filesystem:\r\n # make both lowercase if the file system is case insensitive\r\n actual = actual.lower()\r\n expected = expected.lower()\r\n self.assertEqual(actual, expected)", "def test_global_config_file_creation():\n GlobalConfig()\n\n # Raw config should now exist\n assert fileio.file_exists(os.path.join(APP_DIR, GLOBAL_CONFIG_NAME))", "def ensure_conf_exist(project_root: str) -> Path:\n\n logme_conf = Path(project_root) / 'logme.ini'\n\n if logme_conf.exists():\n yield logme_conf\n else:\n raise FileNotFoundError(f\"log me config file does not exist in {Path.cwd()}, \"\n f\"if you'd like to initialize logme in this directory, please type 'logme init'.\")", "def test_find_config_default(tmpdir, config_dir):\n working_dir = tmpdir.mkdir(\"working\")\n working_dir.join('local.cfg').write('')\n\n with cd(working_dir.strpath):\n found_config = find_config(config_dir.strpath, 'config')\n assert found_config == config_dir.join('config.cfg').strpath\n\n found_config = find_config(config_dir.strpath, 'config.cfg')\n assert found_config == config_dir.join('config.cfg').strpath" ]
[ "0.71266794", "0.66663754", "0.6475454", "0.6352774", "0.6328156", "0.631857", "0.62289643", "0.61653954", "0.6149517", "0.61000323", "0.6091144", "0.6088514", "0.6035728", "0.6010017", "0.5962612", "0.5958839", "0.595415", "0.5898134", "0.5865054", "0.5847888", "0.5824562", "0.58023185", "0.58006305", "0.5789972", "0.5779218", "0.577545", "0.57692164", "0.5749764", "0.57467294", "0.5729941" ]
0.78222615
0
Test to make sure the project directory is returned. Also checks to be sure that 'config is appended to the returned value.
def test_find_project_config_directory(): dir = find.project_config_directory(False) nt.ok_(os.path.exists(dir)) dir = find.project_config_directory() nt.ok_(dir.endswith('config'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_projects_dir():\n # assert path.get_projects_dir() == '/home/pfanelli/python-devel'\n pass", "def test_find_system_config_directory():\n dir = find.system_config_directory()\n nt.ok_(os.path.exists(dir))\n\n project_name = 'test'\n dir = find.system_config_directory(project_name)\n nt.ok_(dir.endswith(project_name))", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def test_config_get(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config get project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_build_dir(self):\n build_dir = local.path(str(CFG['build_dir']))\n self.assertTrue(build_dir.exists())", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']", "def test_get_settings_path():\n\n root_path = application_services.get_application_main_package_path()\n settings_path = os.path.abspath(os.path.join(root_path, 'settings'))\n assert application_services.get_settings_path() == settings_path", "def test_get_upload_directory(self, mock_config_file):\n configuration = Configuration()\n assert configuration.upload_dir == os.path.join(configuration.app_workdir, '.labmanager', 'upload')", "def test_project_path(self):\n\n # Without arguments\n project_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..'\n ))\n self.assertEqual(project_root_path, paths.project())\n\n # With arguments\n self.assertEqual(\n MY_PATH,\n paths.project('tracksim', 'tests', 'test_tracksim.py')\n )", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def test_get_working_directory():\n\n working_directory = application_services.get_working_directory()\n assert os.path.abspath('.') == working_directory", "def test_get_default_settings_path():\n\n root_path = application_services.get_pyrin_main_package_path()\n default_settings_path = os.path.abspath(os.path.join(root_path, 'settings', 'default'))\n assert application_services.get_default_settings_path() == default_settings_path", "def test_get_config_replace_by_os_env():\n value = 'MIMIMIMIMIMIMI'\n os.environ[\"SOURCE_FOLDER\"] = value\n assert get_config('SOURCE_FOLDER') == value", "def check_cc_project_dir():\r\n if len(cc_env.proj_dir) > 140:\r\n msg = ('ERROR: Project directory \"' + cc_env.proj_dir +\r\n '\" is too deep. Please choose a shallow directory'\r\n r'(something like \"C:\\PUMA\").')\r\n raise Exception(msg)\r\n\r\n if (\"-\" in cc_env.proj_dir or \" \" in cc_env.proj_dir or\r\n \".\" in cc_env.proj_dir):\r\n msg = ('ERROR: Project directory cannot contain spaces, dashes, or '\r\n 'special characters.')\r\n raise Exception(msg) \r\n return", "def test_not_github(self):\n project_src_path = 'project-src'\n os.environ['PROJECT_SRC_PATH'] = project_src_path\n generic_ci_env = platform_config.BasePlatformConfig()\n self.assertEqual(generic_ci_env.project_src_path, project_src_path)", "def test_project_path(self):\n ep = exposed.ExposedProject()\n project = MagicMock()\n project.source_directory = os.path.realpath(os.path.dirname(__file__))\n ep.load(project)\n result = ep.path('hello.md')\n self.assertTrue(result.endswith('{}hello.md'.format(os.sep)))", "def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')", "def test_get_pyrin_root_path():\n\n root_path = os.path.abspath('.')\n assert application_services.get_pyrin_root_path() == root_path", "def test_get_project(self):\n pass", "def test_repo_relpath(self):\n from os import path\n repodir = \"~/codes/ci/tests\"\n relpath = \"../pyci/config.py\"\n result = path.expanduser(\"~/codes/ci/pyci/config.py\")\n self.assertEqual(result, get_repo_relpath(repodir, relpath))", "def check_config_file():\n # Locate and init config.\n default_config = \"config.json\"\n if len(sys.argv) == 2:\n # config from command line\n app_config = config_reader(sys.argv[1])\n else:\n # config should be in default\n app_config = config_reader(default_config)\n # fin\n if not app_config:\n print(\"Exiting due to invalid config file.\")\n return False\n # fin\n return app_config", "def get_project_dir():\n\n return Path(settings.PROJECT_DIR)", "def test_base_dir(self):\n self.assertEqual(self.settings.BASE_DIR, TestPredefines.BASE_DIR)", "def test_get_qiime_project_dir(self):\r\n\r\n # Do an explicit check on whether the file system containing\r\n # the current file is case insensitive. This is in response\r\n # to SF bug #2945548, where this test would fail on certain\r\n # unusual circumstances on case-insensitive file systems\r\n # because the case of abspath(__file__) was inconsistent.\r\n # (If you don't believe this, set case_insensitive_filesystem\r\n # to False, and rename your top-level Qiime directory as\r\n # qiime on OS X. That sould cause this test to fail as\r\n # actual will be path/to/qiime and expected will be\r\n # path/to/Qiime.) Note that we don't need to change anything\r\n # in the get_qiime_project_dir() function as if the\r\n # file system is case insenstive, the case of the returned\r\n # string is irrelevant.\r\n case_insensitive_filesystem = \\\r\n exists(__file__.upper()) and exists(__file__.lower())\r\n\r\n actual = get_qiime_project_dir()\r\n # I base the expected here off the imported location of\r\n # qiime/util.py here, to handle cases where either the user has\r\n # Qiime in their PYTHONPATH, or when they've installed it with\r\n # setup.py.\r\n # If util.py moves this test will fail -- that\r\n # is what we want in this case, as the get_qiime_project_dir()\r\n # function would need to be modified.\r\n import qiime.util\r\n util_py_filepath = abspath(abspath(qiime.util.__file__))\r\n expected = dirname(dirname(util_py_filepath))\r\n\r\n if case_insensitive_filesystem:\r\n # make both lowercase if the file system is case insensitive\r\n actual = actual.lower()\r\n expected = expected.lower()\r\n self.assertEqual(actual, expected)", "def test_local_filepath_helper():\n expected_local_filepath = TEST_LOCAL_CONFIG_PATH.replace('.cfg', '_local.cfg')\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH) == TEST_LOCAL_CONFIG_PATH\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH, True) == expected_local_filepath", "def test_config_set(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('config set project name \"Test project\"')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('Test project',\n self.env.config.get('project', 'name'))", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def get_project_config():\n if get_project_config.CONFIG is None:\n import json\n with open('project.json') as fp:\n get_project_config.CONFIG = json.load(fp)\n return get_project_config.CONFIG", "def test_global_config_file_creation():\n GlobalConfig()\n\n # Raw config should now exist\n assert fileio.file_exists(os.path.join(APP_DIR, GLOBAL_CONFIG_NAME))", "def configs() -> Path:\n return TEST_ROOT.parent / \"fixtures\" / \"configs\"" ]
[ "0.73030764", "0.7246068", "0.700198", "0.6881496", "0.6849748", "0.67954606", "0.6739453", "0.6699555", "0.6686036", "0.6663459", "0.6623424", "0.6605419", "0.6453499", "0.6422474", "0.6410907", "0.63848305", "0.6329559", "0.6272082", "0.626627", "0.6255891", "0.6252284", "0.6251846", "0.62500936", "0.62162507", "0.6205307", "0.6200152", "0.61968", "0.61854523", "0.61622775", "0.6145861" ]
0.78609085
0
Test to make sure that three config directories are returned.
def test_find_config_directories(): dirs = find.config_directories('test') nt.eq_(len(dirs), 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_result_directories(self):\n pass", "def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')", "def verify_cache_dirs_exists():\n verify_dir_helper(config.cache_dir)\n for dir in config._directories:\n verify_dir_helper(dir)", "def test_in_dir_from_config_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n yaml_config = tmp_path / \"myconfig.yaml\"\n yaml_config.touch()\n json_config = tmp_path / \"myconfig.json\"\n json_config.touch()\n configs_found = in_dir(tmp_path)\n\n assert len(configs_found) == 2", "def check_configfiles():\n return (all(os.path.isdir(x) for x in CONFIG_DIRS) and\n os.path.isfile(CONFIG_FILE) and os.path.isfile(LOG_CONFIG_FILE))", "def test_find_multiple_config_files():\n config_files = ['~/test__dodai__config.test', '~/test__dodai__config.cfg']\n non_exist_file = '~/test__dodai_config'\n good_files = []\n # touch the files\n for config_file in config_files:\n path = os.path.expanduser(config_file)\n good_files.append(path)\n if os.path.exists(path):\n os.remove(path)\n f = open(path, 'w')\n f.close()\n\n # Add a non existing file\n filenames = config_files + [non_exist_file]\n non_exist_file = os.path.exists(non_exist_file)\n files = find.config_files('test__dodai__config_', filenames, 'utf-8')\n for filename in good_files:\n nt.ok_((filename, 'utf-8') in files)\n nt.ok_((non_exist_file, 'utf-8') not in files)\n for filename in good_files:\n os.remove(filename)", "def test_separated_apps(self):\n self.assertEquals(dirs.get_separated_apps(\"css\"), [])\n self.assertEquals(dirs.get_separated_apps(\"js\"), [])\n \n with self.settings(SEPARATE_CSS=[\"something\"]):\n self.assertIn(\"something\", dirs.get_separated_apps(\"css\"))\n self.assertEquals(len(dirs.get_separated_apps(\"css\")), 1)\n \n with self.settings(SEPARATE_JS=[\"something\"]):\n self.assertIn(\"something\", dirs.get_separated_apps(\"js\"))\n self.assertEquals(len(dirs.get_separated_apps(\"js\")), 1)", "def test_find_project_config_directory():\n dir = find.project_config_directory(False)\n nt.ok_(os.path.exists(dir))\n\n dir = find.project_config_directory()\n nt.ok_(dir.endswith('config'))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS", "def check_configs(self):\n\n pass", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def test_find_config_way_up(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n subdirs = [\"foo\", \"bar\", \"snap\", \"crackle\", \"pop\"]\n\n for sd in subdirs:\n os.mkdir(sd)\n os.chdir(sd)\n\n # Verify our current working dir\n assert_paths_equal(os.getcwd(), in_tmp_path.joinpath(*subdirs))\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, join(*subdirs))", "def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)", "def check_configs():\n configs = [os.path.join(CUCKOO_ROOT, \"conf\", \"cuckoo.conf\"),\n os.path.join(CUCKOO_ROOT, \"conf\", \"reporting.conf\")]\n\n for config in configs:\n if not os.path.exists(config):\n raise CuckooStartupError(\"Config file does not exist at path: %s\" % config)\n\n return True", "def test_empty_azure_config_dir():\n pass", "def test_list_dir_returns_dirs_only(self):\n with self.settings(MIDDLEWARE_CLASSES=self.fix_middleware(), KML_FILE_DIR=self.kml_file_dir):\n user = StaffUserFactory()\n ldv = self.initiate_view(user)\n base_path = settings.KML_FILE_DIR\n print base_path\n ldv.cache_dir_content(base_path)\n dirs = ldv.list_dirs()\n print dirs\n self.assertGreaterEqual(len(dirs), 1)\n for dir_name in dirs:\n dir_path = os.path.join(base_path, dir_name)\n self.assertTrue(os.path.isdir(dir_path))", "def test_find_system_config_directory():\n dir = find.system_config_directory()\n nt.ok_(os.path.exists(dir))\n\n project_name = 'test'\n dir = find.system_config_directory(project_name)\n nt.ok_(dir.endswith(project_name))", "def test_sysconfdir(self):\n self.chck_triple('sysconfdir')", "def test_get_result_directory(self):\n pass", "def test_set_default_output_directory(self):\n self.assertIsNotNone(self.mop1.settings.fileStore)\n self.assertIsNotNone(self.mop3.settings.fileStore)\n self.assertIsNotNone(self.gauss2.settings.fileStore)\n\n self.assertIsNone(self.mop2.settings.fileStore)\n self.assertIsNone(self.gauss1.settings.fileStore)\n\n self.assertIsNone(self.mop1.settings.scratchDirectory)\n self.assertIsNone(self.mop2.settings.scratchDirectory)\n self.assertIsNone(self.mop3.settings.scratchDirectory)\n self.assertIsNone(self.gauss1.settings.scratchDirectory)\n self.assertIsNone(self.gauss2.settings.scratchDirectory)\n\n # Now set the default directories for those not set\n outputDirectory = os.path.join(self.mop1.settings.fileStore, '..', '..')\n self.mop1.set_default_output_directory(outputDirectory)\n self.mop2.set_default_output_directory(outputDirectory)\n self.mop3.set_default_output_directory(outputDirectory)\n self.gauss1.set_default_output_directory(outputDirectory)\n self.gauss2.set_default_output_directory(outputDirectory)\n\n self.assertIsNotNone(self.mop1.settings.fileStore)\n self.assertIsNotNone(self.mop2.settings.fileStore)\n self.assertIsNotNone(self.mop3.settings.fileStore)\n self.assertIsNotNone(self.gauss1.settings.fileStore)\n self.assertIsNotNone(self.gauss2.settings.fileStore)\n self.assertIsNotNone(self.mop1.settings.scratchDirectory)\n self.assertIsNotNone(self.mop2.settings.scratchDirectory)\n self.assertIsNotNone(self.mop3.settings.scratchDirectory)\n self.assertIsNotNone(self.gauss1.settings.scratchDirectory)\n self.assertIsNotNone(self.gauss2.settings.scratchDirectory)", "def check_config(cfg):", "def test_libdir(self):\n self.chck_triple('libdir')", "def test_ensure_dir_exists(self):\n pass", "def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])", "def test_list_config_roots(self):\n with self.override_role():\n self.config_client.list_config_roots()", "def _get_config_dirs(project=None):\n snap = os.environ.get('SNAP')\n snap_c = os.environ.get('SNAP_COMMON')\n\n cfg_dirs = [\n _fixpath(os.path.join('~', '.' + project)) if project else None,\n _fixpath('~'),\n os.path.join('/etc', project) if project else None,\n '/etc',\n os.path.join(snap_c, \"etc\", project) if snap_c and project else None,\n os.path.join(snap, \"etc\", project) if snap and project else None,\n ]\n return [x for x in cfg_dirs if x]", "def test_enumerate_configs(config_dir):\n results = list(enumerate_configs(config_dir.strpath))\n\n assert 'config.cfg' in results\n assert 'module.cfg' in results\n assert 'extra.ini' not in results\n assert 'README' not in results\n assert len(results) == 2", "def test_several_folders(self):\n spider_path = 'tests/sample_spiders/'\n test_data = [\n ('valid_metadata', 1),\n ('no_metadata', 0),\n ('incomplete_metadata', 0),\n ('two_spiders_one_file', 0),\n ('no_basespider_inheritance', 0)\n ]\n\n m = SpiderManager()\n for spidername, valid_spiders in test_data:\n path = spider_path + spidername\n os.environ['SPIDER_PATH'] = path\n\n m.load(path)\n spiders = m.get_spiders()\n\n self.assertEqual(type(spiders), list)\n self.assertEqual(len(spiders), valid_spiders)", "def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']" ]
[ "0.6819444", "0.65486413", "0.6503178", "0.6452762", "0.6326768", "0.62709665", "0.62600607", "0.62281007", "0.62278634", "0.6211113", "0.61404324", "0.611129", "0.61100394", "0.60971874", "0.6079131", "0.6072936", "0.6057406", "0.605582", "0.6047787", "0.6046509", "0.60408914", "0.60408443", "0.6019706", "0.6018976", "0.59960943", "0.59914285", "0.5986274", "0.5956589", "0.5956436", "0.59460324" ]
0.8166695
0
Test to make sure that the multiple config files are being found
def test_find_multiple_config_files(): config_files = ['~/test__dodai__config.test', '~/test__dodai__config.cfg'] non_exist_file = '~/test__dodai_config' good_files = [] # touch the files for config_file in config_files: path = os.path.expanduser(config_file) good_files.append(path) if os.path.exists(path): os.remove(path) f = open(path, 'w') f.close() # Add a non existing file filenames = config_files + [non_exist_file] non_exist_file = os.path.exists(non_exist_file) files = find.config_files('test__dodai__config_', filenames, 'utf-8') for filename in good_files: nt.ok_((filename, 'utf-8') in files) nt.ok_((non_exist_file, 'utf-8') not in files) for filename in good_files: os.remove(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')", "def check_configs(self):\n\n pass", "def check_configs():\n configs = [os.path.join(CUCKOO_ROOT, \"conf\", \"cuckoo.conf\"),\n os.path.join(CUCKOO_ROOT, \"conf\", \"reporting.conf\")]\n\n for config in configs:\n if not os.path.exists(config):\n raise CuckooStartupError(\"Config file does not exist at path: %s\" % config)\n\n return True", "def check_configfiles():\n return (all(os.path.isdir(x) for x in CONFIG_DIRS) and\n os.path.isfile(CONFIG_FILE) and os.path.isfile(LOG_CONFIG_FILE))", "def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)", "def test_ifFileExists():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"file\" in testConfig.config and \"file_locations\" in testConfig.config:\n print \"File In Location: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExistsInPossibleLocations, testConfig.config\n elif \"file\" in testConfig.config:\n print \"File: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExists, testConfig.config", "def test_config_must_exist(cls, values):\n configs = [c.config for c in values.get('configs')]\n for test in values.get('tests'):\n if test.config not in configs:\n raise ValueError(\n f\"Test '{test.test}' gave the config '{test.config}', but \"\n \"this config does not exist in the file \"\n f\"'{values.get('yaml')}'. Configs detected : {configs} \\n\")\n return values", "def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS", "def _check_config(self):", "def check_config(cfg):", "def checkIfFileExistsInPossibleLocations(testConfig):\n assert \"name\" in testConfig\n assert \"file\" in testConfig\n assert \"file_locations\" in testConfig\n testPass = False\n for filePath in testConfig[\"file_locations\"]:\n if isfile(join(filePath,testConfig[\"file\"])):\n testPass=True\n \n assert testPass,\"Failure for package \"+testConfig[\"name\"]+\"\\n File: \"+\\\n testConfig[\"file\"]+\" does not exist\"+\"\\nSearched in \"+\\\n str(testConfig[\"file_locations\"])", "def test_read_config():\n # for config in config_fname, config_solaris_fname:\n for config in config_fnames:\n cfg = _read_config(config)\n assert all(\n \"unknown\" not in block.lower() and block != \"\"\n for block in cfg[\"user_blocks\"]\n )", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def check_config(self):\n cfgs = self.__get() \n \n for option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _file = _default\n else:\n _file = cfgs[option]\n Config.FILE_OPTIONS[option] = _file\n\n if not os.path.exists(_file) and not os.path.isfile(_file):\n self.log.error(\"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n\n\n for option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _dir = _default\n else:\n _dir = cfgs[option]\n Config.PATH_OPTIONS[option] = _dir\n\n if not os.path.exists(_dir) and not os.path.isdir(_dir):\n self.log.error(\"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n\n \n Config.DB_SFT_OPTIONS['sqlalchemy_sft.url'] = cfgs['sqlalchemy_sft.url']\n Config.DB_NAGIOS_OPTIONS['sqlalchemy_nagios.url'] = cfgs['sqlalchemy_nagios.url']\n\n self.log.debug(\"Configuration successfully checked\")", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def check_config(config):\n pass", "def check_config():\n\n if not config_instance:\n LOG.error(\"Failed to load the config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"CONFIG_VERSION\"):\n LOG.warning( \"The config file does not specify CONFIG_VERSION! I will \"\n \"try to continue anyway, but this field is recommended to allow \"\n \"some internal tests to work. I will assume the value '(1,0)'!\" )\n config_instance.CONFIG_VERSION = (1, 0)\n\n major, minor = config_instance.CONFIG_VERSION\n expected_major, expected_minor = EXPECTED_CONFIG_VERSION\n\n if major < expected_major:\n LOG.critical(\"The config system has undergone a major change! \"\n \"I cannot continue without an upgrade!\")\n sys.exit(9)\n\n if minor < expected_minor:\n LOG.warning(\"The config system has undergone a minor change! \"\n \"It should work, but you still should review the docs!\")\n\n if major == expected_major and minor == expected_minor:\n LOG.debug( \"Config version OK!\" )\n\n if not hasattr(config_instance, \"GENERATORS\"):\n LOG.critical(\"Variable 'GENERATORS' not found in config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"TARGETS\"):\n LOG.critical(\"Variable 'TARGETS' not found in config!\")\n sys.exit(9)", "def test_read_valid_configs(self):\n args = argparse.Namespace(server=None, force=False)\n with open(self._config) as config_f:\n with open(self._auth) as auth_config_f:\n (config_data, auth_tuple) = imageroller.main.read_configs(\n args,\n config_f,\n auth_config_f)\n self.assertEqual(config_data.concurrent_workers,\n CONFIG_DATA[\"ConcurrentWorkers\"])\n self.assertEqual(len(config_data.server_data), 1)\n self.assertTupleEqual(auth_tuple, (AUTH_DATA[\"ApiUser\"],\n AUTH_DATA[\"ApiKey\"]))", "def test_in_dir_from_config_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n yaml_config = tmp_path / \"myconfig.yaml\"\n yaml_config.touch()\n json_config = tmp_path / \"myconfig.json\"\n json_config.touch()\n configs_found = in_dir(tmp_path)\n\n assert len(configs_found) == 2", "def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return", "def test_validate_config_file(self):\n ingest_mgmr = IngestManager()\n ingest_mgmr.validate_config_file(self.example_config_data)\n assert(ingest_mgmr.config is not None)\n assert (ingest_mgmr.config.config_data is not None)", "def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")", "def testReadConfig(loggingMixin, configType, configTypeString):\n # This could be different than configType if we want to use a string.\n # We use a different object because we still want to use the standard config type later in the test.\n configTypeForReadingConfig = configType\n if configTypeString:\n configTypeForReadingConfig = configType.name\n (parameters, filesRead) = config.readConfig(configTypeForReadingConfig)\n\n filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"configTestFiles\", \"{}ConfigRef.yaml\".format(configType.name))\n\n # We need to treat whether the file exists with a bit of care.\n # NOTE: Since the parametization causes this to run mulitple times, some will pass and\n # and some will fail, even when creating the configuration files. This is fine.\n if os.path.exists(filename):\n # Access the expected values\n with open(filename, \"r\") as f:\n expected = yaml.load(f.read(), Loader = yaml.SafeLoader)\n else:\n # For making the reference\n with open(filename, \"w\") as f:\n yaml.dump(parameters, f)\n logger.warning(\"Creating configuration reference for {} module\".format(configType.name))\n # We don't want to go further - we're just creating the reference.\n assert False\n\n # Don't compare the full \"_users\" values because they will always be different due to differences in hashing\n paramUsers = parameters.pop(\"_users\", None)\n expectedUsers = expected.pop(\"_users\", None)\n # However, the beginning should match (same idea as in `testBcrypt`)\n lengthToCheck = 7\n # It won't always exist, so we need to check for it first.\n if paramUsers:\n for k, v in iteritems(paramUsers):\n assert v[:lengthToCheck] == expectedUsers[k][:lengthToCheck]\n\n # Apparently the order of these lists can vary between different systems. We don't care about the order\n # - just the values themselves - so we compare them as sets, which don't depend on order.\n paramTemplates = parameters.pop(\"availableRunPageTemplates\", None)\n expectedTemplates = expected.pop(\"availableRunPageTemplates\", None)\n # It won't always exist, so we need to check for it first.\n if paramTemplates:\n assert set(paramTemplates) == set(expectedTemplates)\n\n # Everything else should be identical.\n assert parameters == expected", "def check():\n # Initialize key variables\n config_directory = os.environ['PATTOO_CONFIGDIR']\n\n # Print Status\n print('??: Checking configuration parameters.')\n\n # Check config (pattoo.yaml)\n config_file = configuration.agent_config_filename('pattoo')\n config = files.read_yaml_file(config_file)\n\n # Check main keys\n keys = ['pattoo', 'pattoo_web_api', 'pattoo_agent_api']\n for key in keys:\n if key not in config:\n log_message = ('''\\\nSection \"{}\" not found in configuration file in directory {}. Please fix.\\\n'''.format(key, config_directory))\n log.log2die_safe(80007, log_message)\n\n # Check secondary keys\n secondaries = [\n 'log_level', 'log_directory', 'cache_directory',\n 'daemon_directory']\n secondary_key_check(config, 'pattoo', secondaries)\n secondaries = ['ip_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_agent_api', secondaries)\n secondaries = ['ip_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_web_api', secondaries)\n\n # Check config (pattoo_webd.yaml)\n config_file = configuration.agent_config_filename('pattoo_webd')\n config = files.read_yaml_file(config_file)\n\n # Check main keys\n keys = ['pattoo_webd']\n for key in keys:\n if key not in config:\n log_message = ('''\\\nSection \"{}\" not found in configuration file in directory {}. Please fix.\\\n'''.format(key, config_directory))\n log.log2die_safe(80020, log_message)\n\n # Check secondary keys\n secondaries = ['ip_listen_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_webd', secondaries)\n\n # Print Status\n print('OK: Configuration parameter check passed.')", "def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])", "def test_find_config_files():\n path_base = \"~/test__dodai__config.test\"\n path = os.path.expanduser(path_base)\n if os.path.exists(path):\n os.remove(path)\n f = open(path, 'w')\n f.close()\n files = find.config_files('test__dodai_config', path)\n nt.ok_(len(files) > 0)\n nt.eq_(files[0][0], path)\n if os.path.exists(path):\n os.remove(path)", "def test_all_configs_available():\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in ['TITLE', 'ENCODING', 'FLASK_LOG_LEVEL',\n 'SERVER_NAME', 'SERVER_HOST', 'SERVER_PORT',\n 'ENV', 'DEBUG', 'TESTING', 'UNIT_TESTING'])", "def init_config():\n config_file = create_config_file.CreateConfigFile()\n config_file.check_if_config_file_exists()", "def test_load_config(self):\n config = copyclipper.LoadConfig()\n self.assertTrue(len(config) > 0)", "def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}" ]
[ "0.7504495", "0.74272", "0.7217084", "0.71978724", "0.7190491", "0.71879476", "0.711771", "0.7064929", "0.7038326", "0.7013736", "0.69715494", "0.6963111", "0.69172424", "0.68899876", "0.6855246", "0.6827429", "0.6785877", "0.67457724", "0.67337656", "0.66984266", "0.6672201", "0.664963", "0.66478425", "0.66115594", "0.66090375", "0.659621", "0.65857726", "0.65661186", "0.6555576", "0.6542806" ]
0.7510122
0
This iterator waits until there is potentially new tip information. New tip information means a new peer connected or a new block arrived. Then it yields the peer with the highest total difficulty. It continues indefinitely, until this service is cancelled.
async def wait_tip_info(self) -> AsyncIterator[BaseChainPeer]: if self.is_cancelled: raise ValidationError("%s is cancelled, new tip info is impossible", self) elif not self.is_running: await self.events.started.wait() with self._subscriber() as new_tip_event: while self.is_operational: try: highest_td_peer = self._peer_pool.highest_td_peer except NoConnectedPeers: # no peers are available right now, skip the new tip info yield pass else: yield highest_td_peer await self.wait(new_tip_event.wait()) new_tip_event.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _match_predictive_node_requests_to_peers(self) -> None:\n # If self._queen_tracker terminates we need to exit as well, so check that on every\n # iteration.\n while self.manager.is_running and self._queen_tracker.get_manager().is_running:\n try:\n batch_id, hashes = await asyncio.wait_for(\n self._maybe_useful_nodes.get(eth_constants.MAX_STATE_FETCH),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Reduce the number of predictive peers, we seem to have plenty\n if self._min_predictive_peers > 0:\n self._min_predictive_peers -= 1\n self.logger.debug(\n \"Decremented predictive peers to %d\",\n self._min_predictive_peers,\n )\n # Re-attempt\n continue\n\n # Find any hashes that were discovered through other means, like urgent requests:\n existing_hashes = await asyncio.get_event_loop().run_in_executor(\n None,\n self._get_unique_present_hashes,\n hashes,\n )\n # If any hashes are already found, clear them out and retry\n if existing_hashes:\n # Wake up any paused preview threads\n await self._wakeup_preview_waiters(existing_hashes)\n # Clear out any tasks that are no longer necessary\n await self._maybe_useful_nodes.complete(batch_id, tuple(existing_hashes))\n # Restart from the top\n continue\n\n try:\n peer = await asyncio.wait_for(\n self._queen_tracker.pop_fastest_peasant(),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Increase the minimum number of predictive peers, we seem to not have enough\n new_predictive_peers = min(\n self._min_predictive_peers + 1,\n # Don't reserve more than half the peers for prediction\n self._num_peers // 2,\n )\n if new_predictive_peers != self._min_predictive_peers:\n self.logger.debug(\n \"Updating predictive peer count from %d to %d\",\n self._min_predictive_peers,\n new_predictive_peers,\n )\n self._min_predictive_peers = new_predictive_peers\n\n cancel_attempt = True\n else:\n if peer.eth_api.get_node_data.is_requesting:\n self.logger.debug(\n \"Want predictive nodes from %s, but it has an active request, skipping...\",\n peer,\n )\n self._queen_tracker.insert_peer(peer, NON_IDEAL_RESPONSE_PENALTY)\n cancel_attempt = True\n else:\n cancel_attempt = False\n\n if cancel_attempt:\n # Prepare to restart\n await self._maybe_useful_nodes.complete(batch_id, ())\n continue\n\n self._num_predictive_requests_by_peer[peer] += 1\n self._predictive_requests += 1\n\n self.manager.run_task(\n self._get_predictive_nodes_from_peer,\n peer,\n hashes,\n batch_id,\n )\n\n if self.manager.is_running and not self._queen_tracker.get_manager().is_running:\n self.logger.info(\n \"Backfill is complete, halting predictive downloads...\"\n )", "def findClosePeers(self, key, count, _rpcPeerID=None):", "def iter_mempool_tips_from_best_index(self) -> Iterator[Transaction]:\n assert self.indexes is not None\n if self.indexes.mempool_tips is not None:\n yield from self.indexes.mempool_tips.iter(self)\n else:\n yield from self.iter_mempool_tips_from_tx_tips()", "def _try_peers(self, peers):\n for peer_entry in peers:\n if peer_entry['id'] == self.peer_id:\n continue\n\n print('Trying peer: {}'.format(peer_entry))\n peer = Peer(peer_entry['id'], peer_entry['ip'], peer_entry['port'], self._torrent)\n try:\n peer.connect(self.peer_id)\n except PeerConnectionError:\n continue\n else:\n self._peers.append(peer)\n peer.subscribe_for_messages_to_client(self.peer_message_receiver(peer))", "def iter_mempool_from_tx_tips(self) -> Iterator[Transaction]:\n from hathor.transaction.storage.traversal import BFSTimestampWalk\n\n root = self.iter_mempool_tips_from_tx_tips()\n walk = BFSTimestampWalk(self, is_dag_funds=True, is_dag_verifications=True, is_left_to_right=False)\n for tx in walk.run(root):\n tx_meta = tx.get_metadata()\n # XXX: skip blocks and tx-tips that have already been confirmed\n if tx_meta.first_block is not None or tx.is_block:\n walk.skip_neighbors(tx)\n else:\n assert isinstance(tx, Transaction)\n yield tx", "def iter_mempool_tips_from_tx_tips(self) -> Iterator[Transaction]:\n assert self.indexes is not None\n tx_tips = self.indexes.tx_tips\n\n for interval in tx_tips[self.latest_timestamp + 1]:\n tx = self.get_transaction(interval.data)\n tx_meta = tx.get_metadata()\n assert isinstance(tx, Transaction) # XXX: tx_tips only has transactions\n # XXX: skip txs that have already been confirmed\n if tx_meta.first_block:\n continue\n yield tx", "async def get_peak_loop(self):\n while True:\n try:\n self.blockchain_state = await self.node_rpc_client.get_blockchain_state()\n self.wallet_synced = await self.wallet_rpc_client.get_synced()\n await asyncio.sleep(30)\n except asyncio.CancelledError:\n self.log.info(\"Cancelled get_peak_loop, closing\")\n return\n except Exception as e:\n self.log.error(f\"Unexpected error in get_peak_loop: {e}\")\n await asyncio.sleep(30)", "async def get_peak_loop(self):\n while True:\n try:\n self.blockchain_state = await self.node_rpc_client.get_blockchain_state()\n self.wallet_synced = await self.wallet_rpc_client.get_synced()\n await asyncio.sleep(30)\n except asyncio.CancelledError:\n self.log.info(\"Cancelled get_peak_loop, closing\")\n return\n except Exception as e:\n self.log.error(f\"Unexpected error in get_peak_loop: {e}\")\n await asyncio.sleep(30)", "def getPeers(self, peerType):\r\n raise NotImplementedError()", "def resolve(self):\n # Initialize the winner chain with the local chain\n winner_chain = self.chain\n replace = False\n for node in self.__peer_nodes:\n url = 'http://{}/chain'.format(node)\n try:\n # Send a request and store the response\n response = requests.get(url)\n # Retrieve the JSON data as a dictionary\n node_chain = response.json()\n # Convert the dictionary list to a list of block AND transaction objects\n node_chain = [Block(block['index'], block['previous_hash'], [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']],\n [Chipsaction(tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']],\n [Messsaction(tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']],\n block['proof'], block['timestamp']) for block in node_chain]\n node_chain_length = len(node_chain)\n local_chain_length = len(winner_chain)\n # Store the received chain as the current winner chain if it's longer AND valid\n if node_chain_length > local_chain_length and Verification.verify_chain(node_chain):\n winner_chain = node_chain\n replace = True\n except requests.exceptions.ConnectionError:\n continue\n self.resolve_conflicts = False\n # Replace the local chain with the winner chain\n self.chain = winner_chain\n if replace:\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = []\n self.save_data()\n return replace", "def proof_of_work(self):\n last_block = self.__chain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not Verification.valid_proof(self.__open_transfers, last_hash, proof):\n proof += 1\n print(proof)\n return proof", "async def _match_urgent_node_requests_to_peers(self) -> None:\n while self.manager.is_running:\n urgent_batch_id, urgent_hashes = await self._node_tasks.get(\n eth_constants.MAX_STATE_FETCH\n )\n\n # Get best peer, by GetNodeData speed\n queen = await self._queen_tracker.get_queen_peer()\n\n queen_is_requesting = queen.eth_api.get_node_data.is_requesting\n\n if queen_is_requesting:\n # Our best peer for node data has an in-flight GetNodeData request\n # Probably, backfill is asking this peer for data\n # This is right in the critical path, so we'd prefer this never happen\n self.logger.debug(\n \"Want to download urgent data, but %s is locked on other request\",\n queen,\n )\n # Don't do anything different, allow the request lock to handle the situation\n\n self._num_urgent_requests_by_peer[queen] += 1\n self._urgent_requests += 1\n\n await self._find_urgent_nodes(\n queen,\n urgent_hashes,\n urgent_batch_id,\n )", "def blocks(self):\n for i_retry in range(RETRY):\n try:\n r = self.session.get(self.url, stream=True)\n r.raise_for_status()\n for block_id, start, end in self.ranges:\n block = iter_content(r, end - start)\n yield block_id, start, end, block\n except:\n print(f'{self.ident} retrying {i_retry + 1} times')\n # Restart from scratch\n else:\n break # Download finished\n else:\n print(f'{self.ident} retry failed')", "def iter_mempool_from_best_index(self) -> Iterator[Transaction]:\n assert self.indexes is not None\n if self.indexes.mempool_tips is not None:\n yield from self.indexes.mempool_tips.iter_all(self)\n else:\n yield from self.iter_mempool_from_tx_tips()", "async def _new_blocks(self) -> AsyncGenerator[Eth1Block, None]:\n while True:\n try:\n block = self._eth1_data_provider.get_block(\"latest\")\n except BlockNotFound:\n raise Eth1MonitorValidationError(\"Fail to get latest block\")\n target_block_number = BlockNumber(block.number - self._num_blocks_confirmed)\n from_block_number = self.highest_processed_block_number\n if target_block_number > from_block_number:\n # From `highest_processed_block_number` to `target_block_number`\n for block_number in range(\n from_block_number + 1, target_block_number + 1\n ):\n try:\n block = self._eth1_data_provider.get_block(\n BlockNumber(block_number)\n )\n except BlockNotFound:\n raise Eth1MonitorValidationError(\n f\"Block does not exist for block number={block_number}\"\n )\n yield block\n await trio.sleep(self._polling_period)", "async def collect_pool_rewards_loop(self):\n\n while True:\n try:\n if not self.blockchain_state[\"sync\"][\"synced\"]:\n await asyncio.sleep(60)\n continue\n\n self.scan_p2_singleton_puzzle_hashes = await self.store.get_pay_to_singleton_phs()\n\n scan_phs: List[bytes32] = list(self.scan_p2_singleton_puzzle_hashes)\n peak_height = self.blockchain_state[\"peak\"].height\n\n # Only get puzzle hashes with a certain number of confirmations or more, to avoid reorg issues\n coin_records: List[CoinRecord] = await self.node_rpc_client.get_coin_records_by_puzzle_hashes(\n scan_phs,\n include_spent_coins=False,\n start_height=self.scan_start_height,\n )\n self.log.info(\n f\"Scanning for block rewards from {self.scan_start_height} to {peak_height}. \"\n f\"Found: {len(coin_records)}\"\n )\n ph_to_amounts: Dict[bytes32, int] = {}\n ph_to_coins: Dict[bytes32, List[CoinRecord]] = {}\n not_buried_amounts = 0\n for cr in coin_records:\n self.log.info(f\"coin_record: {cr}\")\n if cr.confirmed_block_index > peak_height - self.confirmation_security_threshold:\n not_buried_amounts += cr.coin.amount\n continue\n if cr.coin.puzzle_hash not in ph_to_amounts:\n ph_to_amounts[cr.coin.puzzle_hash] = 0\n ph_to_coins[cr.coin.puzzle_hash] = []\n ph_to_amounts[cr.coin.puzzle_hash] += cr.coin.amount\n ph_to_coins[cr.coin.puzzle_hash].append(cr)\n\n # For each p2sph, get the FarmerRecords\n farmer_records = await self.store.get_farmer_records_for_p2_singleton_phs(\n set([ph for ph in ph_to_amounts.keys()])\n )\n\n # For each singleton, create, submit, and save a claim transaction\n claimable_amounts = 0\n not_claimable_amounts = 0\n for rec in farmer_records:\n if rec.is_pool_member:\n claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]\n else:\n not_claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]\n\n if len(coin_records) > 0:\n self.log.info(f\"Claimable amount: {claimable_amounts / (10**12)}\")\n self.log.info(f\"Not claimable amount: {not_claimable_amounts / (10**12)}\")\n self.log.info(f\"Not buried amounts: {not_buried_amounts / (10**12)}\")\n\n for rec in farmer_records:\n if rec.is_pool_member:\n singleton_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(\n rec.singleton_tip\n )\n if singleton_tip is None:\n continue\n\n singleton_coin_record: Optional[\n CoinRecord\n ] = await self.node_rpc_client.get_coin_record_by_name(singleton_tip.name())\n if singleton_coin_record is None:\n continue\n if singleton_coin_record.spent:\n self.log.warning(\n f\"Singleton coin {singleton_coin_record.coin.name()} is spent, will not \"\n f\"claim rewards\"\n )\n continue\n\n spend_bundle = await create_absorb_transaction(\n self.node_rpc_client,\n rec,\n self.blockchain_state[\"peak\"].height,\n ph_to_coins[rec.p2_singleton_puzzle_hash],\n self.constants.GENESIS_CHALLENGE,\n )\n\n if spend_bundle is None:\n self.log.info(f\"spend_bundle is None. {spend_bundle}\")\n continue\n\n push_tx_response: Dict = await self.node_rpc_client.push_tx(spend_bundle)\n if push_tx_response[\"status\"] == \"SUCCESS\":\n block_index: List[bytes32] = []\n # TODO(pool): save transaction in records\n for cr in ph_to_coins[rec.p2_singleton_puzzle_hash]:\n if cr.confirmed_block_index not in block_index:\n block_index.append(cr.confirmed_block_index)\n reward = RewardRecord(\n rec.launcher_id,\n cr.coin.amount,\n cr.confirmed_block_index,\n cr.coin.puzzle_hash,\n cr.timestamp\n )\n self.log.info(f\"add reward record: {reward}\")\n await self.store.add_reward_record(reward)\n self.log.info(f\"Submitted transaction successfully: {spend_bundle.name().hex()}\")\n else:\n self.log.error(f\"Error submitting transaction: {push_tx_response}\")\n await asyncio.sleep(self.collect_pool_rewards_interval)\n except asyncio.CancelledError:\n self.log.info(\"Cancelled collect_pool_rewards_loop, closing\")\n return\n except Exception as e:\n error_stack = traceback.format_exc()\n self.log.error(f\"Unexpected error in collect_pool_rewards_loop: {e} {error_stack}\")\n await asyncio.sleep(self.collect_pool_rewards_interval)", "async def peak_post_processing(\n self,\n block: FullBlock,\n state_change_summary: StateChangeSummary,\n peer: Optional[WSChiaConnection],\n ) -> PeakPostProcessingResult:\n\n record = state_change_summary.peak\n difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)\n sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)\n\n self.log.info(\n f\"🌱 Updated peak to height {record.height}, weight {record.weight}, \"\n f\"hh {record.header_hash}, \"\n f\"forked at {state_change_summary.fork_height}, rh: {record.reward_infusion_new_challenge}, \"\n f\"total iters: {record.total_iters}, \"\n f\"overflow: {record.overflow}, \"\n f\"deficit: {record.deficit}, \"\n f\"difficulty: {difficulty}, \"\n f\"sub slot iters: {sub_slot_iters}, \"\n f\"Generator size: \"\n f\"{len(bytes(block.transactions_generator)) if block.transactions_generator else 'No tx'}, \"\n f\"Generator ref list size: \"\n f\"{len(block.transactions_generator_ref_list) if block.transactions_generator else 'No tx'}\"\n )\n\n if (\n self.full_node_store.previous_generator is not None\n and state_change_summary.fork_height < self.full_node_store.previous_generator.block_height\n ):\n self.full_node_store.previous_generator = None\n\n hints_to_add, lookup_coin_ids = get_hints_and_subscription_coin_ids(\n state_change_summary,\n self.subscriptions.has_coin_subscription,\n self.subscriptions.has_ph_subscription,\n )\n await self.hint_store.add_hints(hints_to_add)\n\n sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)\n assert sub_slots is not None\n\n if not self.sync_store.get_sync_mode():\n self.blockchain.clean_block_records()\n\n fork_block: Optional[BlockRecord] = None\n if state_change_summary.fork_height != block.height - 1 and block.height != 0:\n # This is a reorg\n fork_hash: Optional[bytes32] = self.blockchain.height_to_hash(state_change_summary.fork_height)\n assert fork_hash is not None\n fork_block = self.blockchain.block_record(fork_hash)\n\n fns_peak_result: FullNodeStorePeakResult = self.full_node_store.new_peak(\n record,\n block,\n sub_slots[0],\n sub_slots[1],\n fork_block,\n self.blockchain,\n sub_slot_iters,\n difficulty,\n )\n\n if fns_peak_result.new_signage_points is not None and peer is not None:\n for index, sp in fns_peak_result.new_signage_points:\n assert (\n sp.cc_vdf is not None\n and sp.cc_proof is not None\n and sp.rc_vdf is not None\n and sp.rc_proof is not None\n )\n await self.signage_point_post_processing(\n RespondSignagePoint(index, sp.cc_vdf, sp.cc_proof, sp.rc_vdf, sp.rc_proof), peer, sub_slots[1]\n )\n\n if sub_slots[1] is None:\n assert record.ip_sub_slot_total_iters(self.constants) == 0\n # Ensure the signage point is also in the store, for consistency\n self.full_node_store.new_signage_point(\n record.signage_point_index,\n self.blockchain,\n record,\n record.sub_slot_iters,\n SignagePoint(\n block.reward_chain_block.challenge_chain_sp_vdf,\n block.challenge_chain_sp_proof,\n block.reward_chain_block.reward_chain_sp_vdf,\n block.reward_chain_sp_proof,\n ),\n skip_vdf_validation=True,\n )\n\n # Update the mempool (returns successful pending transactions added to the mempool)\n new_npc_results: List[NPCResult] = state_change_summary.new_npc_results\n mempool_new_peak_result: List[Tuple[SpendBundle, NPCResult, bytes32]] = await self.mempool_manager.new_peak(\n self.blockchain.get_peak(), new_npc_results[-1] if len(new_npc_results) > 0 else None\n )\n\n # Check if we detected a spent transaction, to load up our generator cache\n if block.transactions_generator is not None and self.full_node_store.previous_generator is None:\n generator_arg = detect_potential_template_generator(block.height, block.transactions_generator)\n if generator_arg:\n self.log.info(f\"Saving previous generator for height {block.height}\")\n self.full_node_store.previous_generator = generator_arg\n\n return PeakPostProcessingResult(mempool_new_peak_result, fns_peak_result, hints_to_add, lookup_coin_ids)", "async def new_peak(self, request: full_node_protocol.NewPeak, peer: WSChiaConnection) -> None:\n\n try:\n seen_header_hash = self.sync_store.seen_header_hash(request.header_hash)\n # Updates heights in the UI. Sleeps 1.5s before, so other peers have time to update their peaks as well.\n # Limit to 3 refreshes.\n if not seen_header_hash and len(self._ui_tasks) < 3:\n self._ui_tasks.add(asyncio.create_task(self._refresh_ui_connections(1.5)))\n # Prune completed connect tasks\n self._ui_tasks = set(filter(lambda t: not t.done(), self._ui_tasks))\n except Exception as e:\n self.log.warning(f\"Exception UI refresh task: {e}\")\n\n # Store this peak/peer combination in case we want to sync to it, and to keep track of peers\n self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)\n\n if self.blockchain.contains_block(request.header_hash):\n return None\n\n # Not interested in less heavy peaks\n peak: Optional[BlockRecord] = self.blockchain.get_peak()\n curr_peak_height = uint32(0) if peak is None else peak.height\n if peak is not None and peak.weight > request.weight:\n return None\n\n if self.sync_store.get_sync_mode():\n # If peer connects while we are syncing, check if they have the block we are syncing towards\n target_peak = self.sync_store.target_peak\n if target_peak is not None and request.header_hash != target_peak.header_hash:\n peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([target_peak.header_hash])\n # Don't ask if we already know this peer has the peak\n if peer.peer_node_id not in peak_peers:\n target_peak_response: Optional[RespondBlock] = await peer.call_api(\n FullNodeAPI.request_block,\n full_node_protocol.RequestBlock(target_peak.height, False),\n timeout=10,\n )\n if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):\n self.sync_store.peer_has_block(\n target_peak.header_hash,\n peer.peer_node_id,\n target_peak_response.block.weight,\n target_peak.height,\n False,\n )\n else:\n if request.height <= curr_peak_height + self.config[\"short_sync_blocks_behind_threshold\"]:\n # This is the normal case of receiving the next block\n if await self.short_sync_backtrack(\n peer, curr_peak_height, request.height, request.unfinished_reward_block_hash\n ):\n return None\n\n if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:\n # This is the case of syncing up more than a few blocks, at the start of the chain\n self.log.debug(\"Doing batch sync, no backup\")\n await self.short_sync_batch(peer, uint32(0), request.height)\n return None\n\n if request.height < curr_peak_height + self.config[\"sync_blocks_behind_threshold\"]:\n # This case of being behind but not by so much\n if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):\n return None\n\n # This is the either the case where we were not able to sync successfully (for example, due to the fork\n # point being in the past), or we are very far behind. Performs a long sync.\n self._sync_task = asyncio.create_task(self._sync())", "def decide_difficulty(self):\n dt = 0\n n = 0\n\n if self.last_difficulty_calculation + DIFFICULTY_RECALCULATION_PERIOD > self.last_block_height:\n return\n\n nextblock = self.last_blockhash\n for i in range(BLOCK_DIFFULTY_LOOKBACK):\n parent = self.blocks[nextblock].prev_blockhash\n if parent == ROOT_HASH:\n break\n n += 1\n dt += self.blocks[nextblock].timestamp - self.blocks[parent].timestamp\n if n > 0:\n self.last_difficulty_calculation = self.last_block_height\n avg_time_per_block = dt / float(n)\n self.next_difficulty = self.next_difficulty * (BLOCK_PERIOD / avg_time_per_block)\n print(\"New difficulty = {0}\".format(self.next_difficulty))", "def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()", "def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)", "def process_results(self):\n\n while not self.results.empty():\n mvt = self.results.get()\n\n for peer in self.peers_list:\n peer.check_mvt(mvt)\n\n self.results.task_done()", "def countPeers(self, peerType):\r\n raise NotImplementedError()", "def iterate_retries(self, state=None):\n for atom in self.iterate_nodes((co.RETRY,)):\n if not state or self.get_state(atom) == state:\n yield atom", "def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()", "def get_peers(self):\n self.peers = []\n retriever_methods = [\n m\n for m in rtorrent9.peer.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # need to leave 2nd arg empty (dunno why)\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"p.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n self.peers.append(Peer(self._rt_obj, self.info_hash, **results_dict))\n\n return self.peers", "def check_bcr_catchup(self):\n logger.debug(f\"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}\")\n\n # test, perhaps there's some race condition between slow startup and throttle sync, otherwise blocks will never go down\n for peer in self.Peers: # type: NeoNode\n peer.stop_block_loop(cancel=False)\n peer.stop_peerinfo_loop(cancel=False)\n peer.stop_header_loop(cancel=False)\n\n if len(BC.Default().BlockRequests) > 0:\n for peer in self.Peers:\n peer.keep_alive()\n peer.health_check(HEARTBEAT_BLOCKS)\n peer_bcr_len = len(peer.myblockrequests)\n # if a peer has cleared its queue then reset heartbeat status to avoid timing out when resuming from \"check_bcr\" if there's 1 or more really slow peer(s)\n if peer_bcr_len == 0:\n peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0\n\n print(f\"{peer.prefix} request count: {peer_bcr_len}\")\n if peer_bcr_len == 1:\n next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1)\n print(f\"{peer.prefix} {peer.myblockrequests} {next_hash}\")\n else:\n # we're done catching up. Stop own loop and restart peers\n self.stop_check_bcr_loop()\n self.check_bcr_loop = None\n logger.debug(\"BlockRequests have caught up...resuming sync\")\n for peer in self.Peers:\n peer.ProtocolReady() # this starts all loops again\n # give a little bit of time between startup of peers\n time.sleep(2)", "def test_single_chain(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 100 blocks in a row with no transaction but the genesis\n blocks = add_new_blocks(manager, 100, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata(force_reload=True)\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 30, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 50 more blocks in a row with no transactions between them\n blocks = add_new_blocks(manager, 50)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n # Mine 15 more blocks with 10 transactions between each block\n for _ in range(15):\n txs = add_new_transactions(manager, 10, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n blocks = add_new_blocks(manager, 1)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n self.assertConsensusValid(manager)", "def sender_iter(self):\n while 1:\n yield self.send_next()", "async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}" ]
[ "0.5500915", "0.5470931", "0.5319006", "0.5311231", "0.5291946", "0.521966", "0.5215341", "0.5215341", "0.5193207", "0.5080636", "0.50669307", "0.50526375", "0.500329", "0.500004", "0.4986424", "0.4966308", "0.4956242", "0.49543557", "0.49460462", "0.49340892", "0.49252045", "0.4891927", "0.48916927", "0.4863693", "0.4856381", "0.48482618", "0.48396617", "0.4830692", "0.48234415", "0.4817869" ]
0.7476101
0
Inserts tweet into tweets collection
def insert_tweet(status): status['replies'] = [] return db.tweets.insert(status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def insert_tweet(value):\n execute(query=_query['ins_tweet'],\n value=value,\n single=False)\n\n id_value = [[element[0]]for element in value]\n\n execute(query=_query['ins_sentiment'],\n value=id_value, # Tweet ID value\n single=False\n )", "def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ?);\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.executemany(query, infos)", "def insert_tweets(conn: Connection, fetch_data: Iterable[Dict]) -> None:\n\n s = Session(bind=conn)\n meta = MetaData()\n meta.reflect(bind=conn)\n s.add_all([Tweet(**t) for t in fetch_data])\n s.commit()", "def fillTweetInDB(self):\n sqlInsertTweets = \"INSERT INTO tweet content VALUES %s\"\n mycursor.executemany(sqlInsertTweets,self.content)\n mydb.commit()", "def add_tweet():\r\n tweet = models.Tweet(text_content=request.json['content'], username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n db.session.add(tweet)\r\n db.session.commit()\r\n\r\n return {'id': tweet.id}", "def process_tweets(collection):\n\n\twith open('positive-tweets.txt') as p:\n\t\tprint \"{0}: Inserting positive tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in p.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 1})\n\tp.close()\n\n\twith open('negative-tweets.txt') as n:\n\t\tprint \"{0}: Inserting negative tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in n.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 0})\n\tn.close()", "def persist_db(database, tweets):\n log.debug(\"{} tweets to db\".format(len(tweets)))\n\n for tweet in tweets:\n tweet['_id'] = tweet['id_str']\n database.update(tweets)", "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "def add_tweet(self, tweet):\n if tweet.guid not in self.guids:\n self.guids.append(tweet.guid)\n self.data.append(tweet)", "def exportToDB(self, tweets):\n for t in range(len(tweets)):\n for x in range(len(tweets[t])):\n doc_ref = self.fs_db.collection(u'twitter').document(str(tweets[t][1]))\n doc_ref.set({\n u'created_date': str(tweets[t][0]),\n u'id': str(tweets[t][1]),\n u'tweet': tweets[t][2],\n u'screen_name': tweets[t][3],\n u'name': tweets[t][4],\n u'likes': tweets[t][5],\n u'retweets': tweets[t][6],\n u'location': tweets[t][7]\n })", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "def insert_data(data, collec, many):\n db = client.get_database('tweetstorm')\n collection = db.get_collection(collec)\n if many:\n collection.insert_many(data)\n logger.info(f\"{ymdhms()} inserted {len(data)} tweets to {collec} collection\")\n else:\n collection.insert_one(data)\n logger.info(f\"{ymdhms()} inserted data {data} to {collec} collection\")", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def add(self, url):\n record_sql = '''\n INSERT INTO {} (url)\n VALUES (?)\n '''.format(\n self.tablename\n )\n try:\n with self.conn:\n self.conn.execute(record_sql, (url,))\n except sqlite3.IntegrityError:\n logger.exception('Already tweeted %s!', url)", "def postTweet(self, userId, tweetId):\n self.time += 1\n self.tweets[userId] = self.tweets.get(userId, []) + [(-self.time, tweetId)]", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def postTweet(self, userId, tweetId):\n if userId not in self.user_tweets:\n self.user_tweets[userId] = []\n \n self.user_tweets[userId].insert(0, (tweetId, self.buildSortId()))\n self.user_tweets[userId] = self.user_tweets[userId][:10]", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def postTweet(self, userId: int, tweetId: int) -> None:\n if userId not in self.users.keys():\n self.users[userId] = user()\n self.users[userId].tweets.append(tweetId)\n self.tweetTime[tweetId] = self.time\n self.time += 1", "def store_tweet(tweet, topic):\n try:\n tweet = tweet.replace(\"'\", \"\\\\'\" )\n query = f\"insert into {db_schema}.{db_table_tweet} set tweet='{tweet}', topic='{topic}'\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweet_id = int(cur.lastrowid)\n logger.info(f'ID_TWEET: {tweet_id}') \n return tweet_id\n except Exception as ex:\n logger.exception(ex)", "def on_success(self, data):\n if 'text' not in data:\n logging.warning(\"Recieved tweet without text\")\n return\n\n # Save the name of the collection task alongside the tweet data\n data['collection'] = self.name\n\n # Calculate a timestamp object from the data\n ts_float = float(data['timestamp_ms'])\n data['timestamp_obj'] = datetime.utcfromtimestamp(ts_float/1000)\n\n # Insert the tweet into the database\n insertid = None\n if self.db is not None:\n insertid = self.db.insert_one(data).inserted_id\n\n # Call the callback functions if exists\n if self.callbacks is not None:\n for f in self.callbacks:\n f(self.name, data, insertid)", "def postTweet(self, userId: int, tweetId: int) -> None:\n # Time Complexity: O(1)\n if userId not in self.tweets:\n self.tweets[userId] = []\n\n self.tweets[userId].append((-self.timestamp, tweetId))\n self.timestamp += 1", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.users.add(userId)\n self.user_post[userId].append((tweetId, self.time))\n self.time += 1", "def add_tweet():\n if not request.json or 'author_id' not in request.json or 'text' not in request.json:\n abort(400)\n\n db = get_db()\n\n author_id = request.json.get('author_id')\n text = request.json.get('text')\n pub_date = int(time.time())\n\n db.execute('''insert into message (author_id, text, pub_date) values (?, ?, ?)''', (author_id, text, pub_date))\n db.commit()\n flash('Message recorded succesfully')\n message = {\"author_id\": author_id, \"text\": text, \"pub_date\": pub_date}\n return jsonify({'message': message}), 201", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def postTweet(self, userId, tweetId):\n if userId in self.tweets:\n self.tweets[userId].append([-self.time, tweetId])\n else:\n self.tweets[userId] = [[-self.time, tweetId]]\n self.time += 1", "def postTweet(self, userId: int, tweetId: int) -> None:\n ts = time.time()\n self.posts[userId].append((ts, tweetId))" ]
[ "0.747348", "0.7430668", "0.7240626", "0.717541", "0.7118906", "0.7073895", "0.6995458", "0.6932403", "0.6901207", "0.6721981", "0.65963966", "0.65462726", "0.65457416", "0.652735", "0.6409189", "0.640749", "0.6347634", "0.6325132", "0.62893474", "0.62866", "0.6234218", "0.6224336", "0.6213114", "0.6203269", "0.6186357", "0.61748135", "0.6171999", "0.61540365", "0.61532223", "0.6129429" ]
0.74408054
1
Adds a reply to the saved tweet
def add_tweet_reply(tweet_id, user, text): reply = {'user': user, 'text': text} return db.tweets.update( {'id_str': tweet_id}, {'$push': {'replies': reply}}, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_reply(self, comment):\n\t\tpass", "def reply_to_tweet():\n\n print('retrieving and replying to tweets...')\n all_mentions = api.mentions_timeline()\n\n # The content of the reply that the bot will send.\n rap_message = ' yo yo yo yo'\n\n for mention in reversed(all_mentions):\n\n # print(str(mention.id) + '-' + mention.text)\n\n if 'rap for me' in mention.text.lower():\n # checks if the bot received a request to deliver a rap\n print('received a request')\n print('dropping a new single...')\n # Checks if the latest mention came from the same person.\n if mention.id == mention.id[0]:\n # Posts a tweet saying the bot is 'too tired' and won't generate a new rap.\n api.update_status('@' + mention.user.screen_name + ' yo sorry I am too tired right now')\n else:\n # Posts a tweet with the rap to the user.\n api.update_status('@' + mention.user.screen_name + rap_message, mention.id)\n print('single dropped.')", "async def reply_tweet(reply: str,\n in_reply_to: str = Query(None, alias=\"link of tweet\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n status_id: int = Query(None, alias=\"id of tweet\"),\n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n if not (in_reply_to or status_id):\n raise HTTPException(400, detail=\"Please enter either a Tweet ID or link to reply to\")\n\n if in_reply_to:\n regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n status_id = regex.group(\"id\")\n\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=reply,\n in_reply_to_status_id=status_id,\n auto_populate_reply_metadata=True\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def post_reply(request, pk=None):\n user = request.user\n reply_original = Post.objects.get(pk=pk)\n\n check = check_reply(user)\n if check == 'not_auth':\n return redirect('posts:post', pk=reply_original.pk)\n\n reply = Post(content=request.POST['content_reply'],\n author=user,\n reply_original=reply_original)\n\n reply.save()\n reply_original.reply.add(reply)\n\n return redirect('posts:post', pk=reply_original.pk)", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "async def append(self, reply: Reply) \\\n -> None:\n result = reply.result\n identifier = result.get(f.IDENTIFIER.nm)\n txnId = result.get(TXN_ID)\n logger.debug(\"Reply being sent {}\".format(reply))\n if self._isNewTxn(identifier, reply, txnId):\n self.addToProcessedTxns(identifier, txnId, reply)\n if identifier not in self.responses:\n self.responses[identifier] = asyncio.Queue()\n await self.responses[identifier].put(reply)", "def add_answer(self, text, responder_id):\n answer = text.split('[Answer]')[1].strip()\n m = re.search('\\[(qid):([0-9]*)\\]', answer)\n if m is not None:\n question_id = m.group(2)\n answer_text = answer.split('[qid:{0}]'.format(question_id))[1].strip()\n # stores present answer\n self.cur.execute(\n \"INSERT INTO answer (answer, responder_id, question_id) VALUES (%s, %s, %s);\",\n (answer_text, responder_id, question_id))\n self.cur.execute(\n \"INSERT INTO users (user_id) SELECT (%s) WHERE NOT EXISTS (SELECT * FROM users WHERE user_id=%s);\",\n (str(responder_id), str(responder_id)))\n self.event_handler.new_answer(question_id, answer, responder_id)\n else:\n self.stored_answer = False", "def careful_reply(api,reply):\r\n\r\n debug_print('Preparing to reply to #%d' % (reply.id,))\r\n normalized_tweet = reply.text.lower().strip()\r\n\r\n # Don't reply to a retweet\r\n if hasattr(reply, 'retweeted_status'):\r\n return\r\n\r\n debug_print('Replying to #%d' % (reply.id,))\r\n update = \"@%s We'd estimate about a %d percent chance, actually.\" % (reply.user.screen_name, random.randint(0,100),)\r\n return api.update_status(update, reply.id)", "def add_retweet(id):\r\n # if original tweet does not exist -> 404\r\n models.Tweet.query.get_or_404(id)\r\n\r\n retweet = models.Retweet(post_id=id, username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n\r\n db.session.add(retweet)\r\n db.session.commit()\r\n\r\n return {'retweet_id': retweet.retweet_id}", "def insert_tweet(status):\n status['replies'] = []\n return db.tweets.insert(status)", "def answerMention(r, comment, answeredDB, helper):\n\n cards, answer = helper.parseText(comment.body)\n\n if cards and answer:\n if not answeredDB.exists(comment.parent_id, cards):\n # reply to comment\n log.info(\"replying to comment: %s %s with %s\",\n comment.id, comment.author.name, cards)\n comment.reply(answer)\n else:\n log.debug(\"forwarded mention with id: %s\", comment.id)\n # forward mentions without cards to admin\n subject = '${} /u/{} in /r/{}/ \"{}\"'.format(comment.id, comment.author,\n comment.subreddit, comment.submission.title)\n r.redditor(credentials.admin_username).message(subject, comment.body)", "def newreply(request, post_id):\n if not request.user.is_authenticated():\n return redirect('/login/?next=%s' % request.path)\n else:\n\n reply = Reply.objects.create(\n creator = request.user,\n created = datetime.datetime.now(),\n body = request.POST.get('mensaje'),)\n post = Post.objects.get(id = post_id)\n post.reply.add(reply) \n return redirect('/home/')", "def test_reply(self):\n tweet_object = self.load_tweet('reply')\n tweet_text = self.api.html_for_tweet(tweet_object)\n self.assertEqual(tweet_text,\n u'<span class=\"twython-tweet-prefix\"><a href=\"https://twitter.com/philgyford\" class=\"twython-mention\">@philgyford</a> </span>Here’s a test tweet that goes on as much as possible and includes an image. Hi to my fans in testland!<span class=\"twython-tweet-suffix\"> https://t.co/tzhyk2QWSr</span>')", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def reply_this(self, user, text):\n parent = self.get_parent()\n reply_news = News.objects.create(\n user=user, content=text, reply=True, parent=parent\n )\n notification_handler(\n user,\n parent.user,\n Notification.REPLY,\n action_object=reply_news,\n id_value=str(parent.uuid_id),\n key=\"social_update\",\n )", "def new_reply(cls, thread, user, content):\n msg = cls.objects.create(thread=thread, sender=user, content=content)\n thread.userthread_set.exclude(user=user).update(deleted=False, unread=True)\n thread.userthread_set.filter(user=user).update(deleted=False, unread=False)\n message_sent.send(sender=cls, message=msg, thread=thread, reply=True)\n #for recip in thread.userthread_set.exclude(user=user):\n # send_newmessage_mail(msg, recip.user)\n return msg", "def reply(cls, user, context, message, reply_message):\r\n pass", "def add_tweet():\n if not request.json or 'author_id' not in request.json or 'text' not in request.json:\n abort(400)\n\n db = get_db()\n\n author_id = request.json.get('author_id')\n text = request.json.get('text')\n pub_date = int(time.time())\n\n db.execute('''insert into message (author_id, text, pub_date) values (?, ?, ?)''', (author_id, text, pub_date))\n db.commit()\n flash('Message recorded succesfully')\n message = {\"author_id\": author_id, \"text\": text, \"pub_date\": pub_date}\n return jsonify({'message': message}), 201", "def reply(cls, user, context, message, reply_message):\n pass", "def forwardMentionAnswer(r, answer_msg):\n comment_id = getIdFromSubject(answer_msg.subject)\n\n if comment_id:\n src_comment = r.comment(comment_id)\n\n if src_comment:\n log.debug(\"forwarded answer to comment id: %s\", src_comment.id)\n src_comment.reply(answer_msg.body)\n answer_msg.reply(\"answer forwarded\")", "def reply(self, irc, msg, args, user, id, text):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.error('That\\'s not a note in my database.', Raise=True)\n if note.to != user.id:\n irc.error('You may only reply to notes '\n 'that have been sent to you.', Raise=True)\n self.db.setRead(id)\n text += ' (in reply to #%s)' % id\n public = irc.isChannel(msg.args[0])\n try:\n target = ircdb.users.getUser(note.frm)\n except KeyError:\n irc.error('The user who sent you that note '\n 'is no longer in my user database.', Raise=True)\n id = self.db.send(user.id, note.frm, public, text)\n irc.reply(format('Note #%i sent to %s.', id, target.name))", "def say_to_user(self, user, reply):\n self.line_queue.put(user + \": \" + reply)", "async def quote_tweet(quoted_reply:str,\n attachment_url: str = Query(..., alias=\"link of tweet\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n\n # regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n # status_id = regex.group(\"id\")\n\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=quoted_reply,\n attachment_url=attachment_url,\n # auto_populate_reply_metadata=True\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def postTweet(self, userId, tweetId):\n if userId in self.tweets:\n self.tweets[userId].append([-self.time, tweetId])\n else:\n self.tweets[userId] = [[-self.time, tweetId]]\n self.time += 1", "def postTweet(self, userId, tweetId):\r\n self.timestamp += 1\r\n self.tweets_by_user[userId].append((self.timestamp, tweetId))", "def cmd_comment_reply(client, args):\n comment_reply = client.post_comment_reply(args.comment_id, args.image_id,\n args.comment)\n generate_output({'comment_reply': comment_reply})", "def reply_to(self, reply_to):\n\n self._reply_to = reply_to" ]
[ "0.6981421", "0.6746584", "0.6701164", "0.65988404", "0.6539125", "0.6533228", "0.6533228", "0.65100837", "0.64546037", "0.63149333", "0.61954737", "0.61765563", "0.6176331", "0.6119121", "0.6108011", "0.61018324", "0.6095213", "0.60768723", "0.6070904", "0.60695225", "0.6035818", "0.6025983", "0.5967324", "0.59391075", "0.59191436", "0.58782756", "0.5876731", "0.58624923", "0.5860373", "0.58586633" ]
0.7729473
0
Creates a components dict for the google geocoder
def create_google_components(status): components = None if status['place']: split = status['place']['full_name'].split(',') components = { 'locality': split[0].strip(), 'administrative_area': split[1].strip(), 'country': status['place']['country'] } elif status['user'].has_key('location') and status['user']['location']: split = status['user']['location'].split(',') components = { 'locality': split[0].strip(), 'administrative_area': split[1].strip() if len(split) > 1 else '' } return components
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def components_map(self):\r\n raise NotImplementedError", "def fetch(self, radius: int) -> dict:\n # convert radius integer to string\n radius: str = f\"{radius}mi\" \n # set empty dict\n geocodes: dict = {}\n # iterate through instantiated locations list\n # set search parameters to pass to callGoogle method\n for location in self.locations:\n\n params: dict = {\n\n 'address': location,\n 'sensor': 'false',\n 'key': self.__api_key['google_key']\n\n }\n # define key value pairs | city - geocode\n geocodes[location]: str = f\"{callGoogle(endpoint=self.__api_endpoint, params=params)},{radius}\"\n\n return geocodes", "def get_provider_properties_dict(self):\n pass", "def __init__(self, locality=False):\n self.google = googlemaps.Client(key=os.environ['GOOGLE_API_KEY'])\n\n if locality:\n self.bounds = self.bound(locality)\n self.northeast, self.southwest = self.nad83(self.bounds.northeast), self.nad83(self.bounds.southwest)\n self.geography = self.grid(self.northeast, self.southwest)\n\n return None", "def list_components(self) -> Dict[str, Any]:\n return {c.name: c for c in self._components}", "def geocode(location):\n\n\ttxt = fetch_mapzen_response(location)\n\tmydict = parse_mapzen_response(txt)\n\tmydict['query_text'] = location\n\treturn mydict", "def geocode(self, geocoder):\n for term in self.terms:\n # No need to geocode regions\n if not term.get('region'):\n geo = geocoder.geocode(term['string'])\n if geo:\n term['geo'] = geo\n if not self.region:\n # TODO: descobrir regiao do ponto\n self.region = \"???\"\n else:\n self.region = term['region']", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def __get_coords_from(self, name):\n geolocator = Nominatim(user_agent=\"spanish\")\n geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)\n location = geocode(name)\n return {\n \"name\": name,\n \"latitude\": location.latitude,\n \"longitude\": location.longitude,\n }", "def test_build_map_dict_by_name():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n # Get pygal country code map\n pygal_countries = {'KEN':'Kenya', 'IDN':'Indonesia'}\n\n # 1960\n res = build_map_dict_by_name(gdpinfo, pygal_countries, \"1960\")\n print(res)", "def name(self):\r\n return \"pdok-reverse-geocoder\"", "def roadToCoor(rn):\n # sleep(2)\n g = gmaps.geocode(rn)\n\n zipCode = None\n coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None, None, None\n if len(g) > 0:\n if len(g) > 0:\n for ac in g[0]['address_components']:\n try:\n if ac['types'][0] == 'postal_code':\n zipCode = ac['long_name']\n except:\n zipCode = None\n\n if 'location' in g[0]['geometry'].keys():\n try:\n coor = g[0]['geometry']['location'] # APPROXIMATE location\n coor_Lat = coor['lat']\n coor_Lng = coor['lng']\n except:\n coor_Lat, coor_Lng = None, None\n\n if 'bounds' in g[0]['geometry'].keys(): # bounding box\n try:\n bbox = g[0]['geometry']['bounds']\n bbox_NE_Lat = bbox['northeast']['lat']\n bbox_NE_Lng = bbox['northeast']['lng']\n bbox_SW_Lat = bbox['southwest']['lat']\n bbox_SW_Lng = bbox['southwest']['lng']\n except:\n bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None\n\n # g = geocoder.google(loc)\n # print(loc, g.latlng)\n coors = (coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng)\n return zipCode, coors", "def _packaged_dict_for_entity(rt):\n entity = rt.entity\n return {u'entity_id': entity.id,\\\n u'name': entity.aggregation_paths['_geo'][-1]}", "def __init__(self, scheme=DEFAULT_SCHEME, timeout=DEFAULT_TIMEOUT, proxies=None, user_agent=None):\n super(DataBC, self).__init__(\n scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent\n )\n self.api = '%s://apps.gov.bc.ca/pub/geocoder/addresses.geojson' % self.scheme", "def _GetComponents(\n self,\n ) -> Dict[str, Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]]]:\n self._CreateSchemas()\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n # The `Components Object` `components` field of the root `OpenAPI Object`.\n return {\n \"schemas\":\n cast(Dict[str, Union[PrimitiveSchema, EnumSchema, MessageSchema]],\n self.schema_objs),\n }", "def build_I_map(self):\n raise NotImplementedError", "def computeGC2(self, lon, lat, depth):\n # This just returns defaults of zero, which will hopefully behave\n # gracefully as used in GMPEs.\n dict = {\"rx\": np.zeros_like(lon),\n \"ry\": np.zeros_like(lon),\n \"ry0\": np.zeros_like(lon),\n \"U\": np.zeros_like(lon),\n \"T\": np.zeros_like(lon)\n }\n return dict", "def get_dict(self):\r\n return self.cmap", "def name_places(self):\n self.city_names = {}\n self.region_names = {}\n for city in self.cities:\n self.city_names[city] = self.lang.name(\"city\")\n for region in np.unique(self.territories):\n self.region_names[region] = self.lang.name(\"region\")", "def retrieveManualGeocodes():\n\n\tshp_2013 = join(project_dir, '2013', 'shp')\n\tw_lid = join(shp_2013, 'west_lid_qcew13_zip_regeocoded.shp')\n\te_lid = join(shp_2013, 'east_lid_qcew13_zip_regeocoded.shp')\n\n\tbin_dict = {}\n\tfor lid in (w_lid, e_lid):\n\t\twith da.SearchCursor(lid, '*') as cursor:\n\t\t\tfor row in cursor:\n\t\t\t\td = OrderedDict(zip(cursor.fields, row))\n\t\t\t\t# if the geometry wasn't matched in the geocoding it has\n\t\t\t\t# a value of (None, None) in the 'Shape' field\n\t\t\t\tif d['Status'] != 'U':\n\t\t\t\t\tgeo_fields = (\n\t\t\t\t\t\t'Shape', 'Loc_name', 'Score', 'Match_type')\n\t\t\t\t\tgeo_dict = {k: d[k] for k in geo_fields}\n\t\t\t\t\tbin_dict[d['BIN']] = geo_dict\n\t\n\treturn bin_dict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def get_location_gecode_address_str(address):\n location = {\n 'Latitude': {\n 'Value': None\n },\n 'Longitude': {\n 'Value': None\n }\n }\n geo_res = []\n if bool(address): # Check if address is non-falsey \n geo_res = gmaps.geocode(address)\n if len(geo_res) != 0:\n latitude = geo_res[0]['geometry']['location']['lat']\n longitude = geo_res[0]['geometry']['location']['lng']\n location['Latitude']['Value'] = latitude\n location['Longitude']['Value'] = longitude\n return location", "def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)", "def createSurfaceGeo(self):\n self.surfGeo = dict()\n r = self.geoParam['CylinderLightGuideRadius']\n self.surfGeo[r] = 'LightGuide'\n #self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n r += self.geoParam['DetectorThickness']\n self.surfGeo[r] = 'Detector'\n r += self.geoParam['DetectorSpacing']\n if (r < self.geoParam['CylinderRadius']):\n self.surfGeo[r] = 'LightGuide'\n return self.surfGeo", "def main():\n # start_time = time.time()\n\n # city: (latitude, longtitude) +/- 0.5 degree\n # geocenter for EU: 50 9\n cities = {\n 'Vienna': (48, 16),\n 'Brussels': (51, 4),\n 'Sofia': (43, 23),\n 'Zagreb': (46, 16),\n 'Nicosia': (35, 33),\n 'Prague': (50, 14),\n 'Copenhagen': (55, 13),\n 'Tallinn': (59, 25),\n 'Helsinki': (60, 25),\n 'Paris': (49, 2),\n 'Berlin': (53, 13),\n 'Athens': (38, 24),\n 'Budapest': (48, 19),\n 'Dublin': (53, -6),\n 'Rome': (42, 13),\n 'Riga': (57, 24),\n 'Vilnius': (55, 25),\n 'Luxembourg': (50, 6),\n 'Valletta': (36, 15),\n 'Amsterdam': (52, 5),\n 'Warsaw': (52, 21),\n 'Lisbon': (39, -9),\n 'Bucharest': (44, 26),\n 'Bratislava': (48, 17),\n 'Ljubljana': (46, 15),\n 'Madrid': (40, -4),\n 'Stockholm': (59, 18),\n 'London': (52, 0)\n }\n\n cities = OrderedDict(sorted(cities.items(), key=lambda t: t[0]))\n cities_indices = [x for x in range(len(cities))]\n cities_names = [key for key in cities.keys()]\n\n for key, value in cities.items():\n nu_v = hf.equirectangular_projection(\n value[0], value[1], phi_0=50, l_0=9)\n cities[key] = nu_v\n\n decoder = {value: key for (key, value) in cities.items()}\n\n ga.cities = cities\n # ga.cities_names = cities_names\n # ga.cities_indices = cities_indices\n param_names = ['v1', 'v2', 't', 'n', 'pm', 'pc', 'tournsize', 'size']\n f = open('params.txt', 'r')\n param_values = [float(l) if '.' in l else int(l) for l in f]\n f.close()\n params = dict(zip(param_names, param_values))\n\n ga.Salesman.diploid = True\n starters = ga.mfp(params['size'])\n v1 = params['v1'] # velocity 1 in Poland\n v2 = params['v2'] # velocity 2 in Poland\n t = params['t'] # period of change of velocity in Poland\n n = params['n'] # number of generations\n pm = params['pm'] # probabilty of mutation (per gene)\n pc = params['pc'] # probability of crossover\n tournsize = params['tournsize']\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.velocity_pol = v1\n path_s = ga.findbest(salesmen).fitness\n print('first population best: ' + str(round(1 / path_s, 2)) + ' hours')\n\n results = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results.append([i + 1, path])\n\n path_d = ga.findbest(salesmen).fitness\n path_d_seq = ga.findbest(salesmen).best_seq\n print(str(n) + '-th population best (diploidal): ' +\n str(round(1 / path_d, 2)) + ' hours')\n print([decoder[x] for x in path_d_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n start_time = time.time()\n salesmen = starters\n ga.Salesman.diploid = False\n ga.Salesman.velocity_pol = v1\n\n results2 = [[0, path_s]]\n counter = 0\n for i in range(n):\n if counter == t // 2 - 1:\n ga.Salesman.velocity_pol = v1 if ga.Salesman.velocity_pol == v2 \\\n else v2\n counter = 0\n counter += 1\n salesmen = ga.evolution(salesmen, pm, pc, tournsize)\n path = ga.findbest(salesmen).fitness\n results2.append([i + 1, path])\n\n path_h = ga.findbest(salesmen).fitness\n path_h_seq = ga.findbest(salesmen).city_seq\n print(str(n) + '-th population best (haploidal): ' +\n str(round(1 / path_h, 2)) + ' hours')\n print([decoder[x] for x in path_h_seq])\n print(\"Time elapsed: \" + str(time.time() - start_time) + 's')\n\n # plot fitnesses:\n results = np.asarray(results)\n results2 = np.asarray(results2)\n plt.plot(results[:, 0], results[:, 1], 'b-', label='diploidal')\n plt.plot(results2[:, 0], results2[:, 1], 'g-', label='haploidal')\n plt.legend(loc=4)\n plt.show()\n\n # plot paths:\n fig, ax = plt.subplots(1)\n\n starters_best_seq = ga.findbest(starters).city_seq\n starters_best_seq += [starters_best_seq[0]] # close the loop\n starters_best_seq = np.asarray(starters_best_seq)\n plt.plot(starters_best_seq[:, 0], starters_best_seq[:, 1], 'r-', alpha=0.2)\n\n labels = cities_indices\n cities = np.asarray(list(ga.cities.values()))\n\n plt.scatter(cities[:, 0], cities[:, 1], color='r')\n for label, x, y in zip(labels, cities[:, 0], cities[:, 1]):\n plt.annotate(label, xy=(x, y), xytext=(-6, -12),\n textcoords='offset points')\n poland_c = hf.equirectangular_projection(52, 19, 50, 9)\n poland = plt.Circle(poland_c, .047, color='r', alpha=0.3)\n ax.add_artist(poland)\n\n path_d_seq = path_d_seq + [path_d_seq[0]]\n path_d_seq = np.asarray(path_d_seq)\n\n path_h_seq = path_h_seq + [path_h_seq[0]]\n path_h_seq = np.asarray(path_h_seq)\n\n plt.plot(path_h_seq[:, 0],\n path_h_seq[:, 1], 'g-', label='haploidal')\n plt.plot(path_d_seq[:, 0],\n path_d_seq[:, 1], 'b-', label='diploidal')\n\n legend = \"Legend:\\n\"\n legend += \"\\n\".join([str(ii) + ': ' + name\n for ii, name in enumerate(cities_names)])\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(-0.15, 0.95, legend,\n transform=ax.transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n\n plt.axis('off')\n plt.legend(loc=4)\n plt.show()", "def get_info_dict(self):\n return {\n 'bidi': self.bidi,\n 'code': self.code,\n 'name': self.name,\n 'name_local': self.name_local\n }", "def get_geo_data(request):\n\n # Note that geoip2 (from maximind) doesn't work on GAE because there is a\n # C lib in there apparently.\n # We can use Appengine's added headers to do that work though thankfully.\n geo = dict()\n geo['region'] = request.headers.get(\"X-AppEngine-Region\", \"unknown\")\n geo['city'] = request.headers.get(\"X-AppEngine-City\", \"unknown\")\n geo['country'] = request.headers.get(\"X-AppEngine-Country\", \"unknown\")\n geo['city_lat_long'] = request.headers.get(\"X-AppEngine-CityLatLong\", \"unknown\")\n\n return geo", "def get_chromosome_object(agp):\n\n chr = {}\n\n agp = agp.split('\\n')\n\n for i, line in enumerate(agp):\n if len(line) == 0 or line[0] == '#':\n continue\n tabs = line.split(\"\\t\")\n acc = tabs[0]\n start = int(tabs[1])\n stop = int(tabs[2])\n comp_type = tabs[6]\n if 'acc' not in chr:\n chr['accession'] = acc\n chr['type'] = 'nuclear'\n if comp_type == 'centromere':\n chr['centromere'] = {\n 'start': start,\n 'length': stop - start\n }\n if i == len(agp) - 2:\n chr['length'] = stop\n return chr", "def valid_address_dict():\n return dict(\n first_name=\"Test\",\n last_name=\"User\",\n street_address_1=\"1 Main St\",\n state_or_territory=\"US-MA\",\n city=\"Cambridge\",\n country=\"US\",\n postal_code=\"02139\",\n )" ]
[ "0.59961295", "0.58848774", "0.5472175", "0.52825177", "0.52088803", "0.5155671", "0.5066724", "0.5063108", "0.50595534", "0.50516945", "0.5017744", "0.5011075", "0.49679703", "0.49611756", "0.49348325", "0.4923301", "0.49214232", "0.49026912", "0.4897452", "0.4863266", "0.4852003", "0.4852003", "0.4851725", "0.48114902", "0.48057002", "0.4795321", "0.47947434", "0.4773176", "0.47706097", "0.47690696" ]
0.64234304
0
Callback fired on data from the Twitter streaming API. Filters out tweets with RTs or urls in them, geocodes them if they have location information, and pushes the geocoded tweets out to connected clients
def tweet_callback(status): if status[-3].endswith('}'): status = json.loads(status) if tweet_is_valid(status): if CLIENTS: status = geocode_status(status) broadcast(status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_tweet(tweet):\n global start_date\n global end_date\n global geo_enabled_tweets\n global retweets\n\n # Check for filters before processing any further\n if args.filter and tweet.source:\n if not args.filter.lower() in tweet.source.lower():\n return\n\n tw_date = tweet.created_at\n\n # Updating most recent tweet\n end_date = end_date or tw_date\n start_date = tw_date\n\n # Handling retweets\n try:\n # We use id to get unique accounts (screen_name can be changed)\n rt_id_user = tweet.retweeted_status.user.id_str\n retweeted_users[rt_id_user] += 1\n\n if tweet.retweeted_status.user.screen_name not in id_screen_names:\n id_screen_names[rt_id_user] = \"@%s\" % tweet.retweeted_status.user.screen_name\n\n retweets += 1\n except:\n pass\n\n # Adding timezone from profile offset to set to local hours\n if tweet.user.utc_offset and not args.no_timezone:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=tweet.user.utc_offset))\n\n if args.utc_offset:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=args.utc_offset))\n\n # Updating our activity datasets (distribution maps)\n activity_hourly[\"%s:00\" % str(tw_date.hour).zfill(2)] += 1\n activity_weekly[str(tw_date.weekday())] += 1\n\n # Updating langs\n detected_langs[tweet.lang] += 1\n\n # Updating sources\n detected_sources[tweet.source] += 1\n\n # Detecting geolocation\n if tweet.place:\n geo_enabled_tweets += 1\n tweet.place.name = tweet.place.name\n detected_places[tweet.place.name] += 1\n\n # Updating hashtags list\n if tweet.entities['hashtags']:\n for ht in tweet.entities['hashtags']:\n ht['text'] = \"#%s\" % ht['text']\n detected_hashtags[ht['text']] += 1\n\n # Updating domains list\n if tweet.entities['urls']:\n for url in tweet.entities['urls']:\n domain = urlparse(url['expanded_url']).netloc\n if domain != \"twitter.com\": # removing twitter.com from domains (not very relevant)\n detected_domains[domain] += 1\n\n # Updating mentioned users list\n if tweet.entities['user_mentions']:\n for ht in tweet.entities['user_mentions']:\n mentioned_users[ht['id_str']] += 1\n if not ht['screen_name'] in id_screen_names:\n id_screen_names[ht['id_str']] = \"@%s\" % ht['screen_name']", "def on_success(self, data):\n\n # only want to collect English-language tweets\n if data['lang'] == 'en':\n tweet = data['text']\n processed_tweets.append(preprocess_tweet(tweet))\n\n # stop when we've collected enough\n if len(processed_tweets) >= 5:\n self.disconnect()", "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def TwitterListener():\n l = StdOutListener()\n auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n stream = Stream(auth, l)\n api = API(auth_handler=auth)\n config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]\n\n print(\"Stream listener is up and running\")\n stream.filter(track=config.HASHTAGS)", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def geo_collect_tweets(search_term,latitude,longitude,radius):\n i = None\n tweets = []\n rep = 1\n for n in range(2): #can only search 100 tweets at a time, so run search multiple times\n \tresults = api.GetSearch(term = search_term, \n \t\tcount = 100, \n \t\tresult_type = 'recent', \n \t\tmax_id = i, #start a search from the most recent tweet id, working backwards\n \t\tgeocode =(latitude, longitude, radius))\n for tweet in results:\n tweets.append(tweet.text)\n i = tweet.id - 1 #want it to start at the tweet after the last tweet\n rep += 1\n return list(set(tweets)) #set gets rid of repititve tweets, but need to return a list", "def callback(message):\n try:\n # some unicode cleanup\n line = message.data.replace('\\\\u0000'.encode(),''.encode())\n temp = json.loads(line)\n if not 'user' in temp.keys():\n # means this message doesn't contain a bonafide tweet\n message.ack()\n return\n userdict = temp['user']\n if userdict['url'] is None:\n # so no null URL values get loaded\n userdict['url'] = '-'\n if userdict['location'] is None:\n userdict['location']=''\n if temp['geo'] is not None:\n lat = temp['geo']['coordinates'][0]\n lng = temp['geo']['coordinates'][1]\n else:\n lat=0\n lng=0\n\n placedict = temp['place']\n # some tweets have place information, others don't, we need a different\n # set of SQL statements for each case.\n if placedict is not None:\n placeid = temp['place']['id']\n insertsqlplace = \"\"\"\n INSERT INTO users \n (id, name, screen_name, location,\n url, description, verified, followers_count, friends_count,\n statuses_count, geo_enabled, lang, created_at,\n profile_background_image_url) \n VALUES\n (%(id)s, %(name)s, %(screen_name)s, %(location)s, %(url)s,\n %(description)s, %(verified)s, %(followers_count)s, %(friends_count)s,\n %(statuses_count)s, %(geo_enabled)s, %(lang)s, %(created_at)s,\n %(profile_background_image_url)s) \n ON CONFLICT DO NOTHING;\n \n INSERT INTO place\n (id, url, place_type, name, full_name, bounding_box_json)\n VALUES\n (%(idp)s, %(urlp)s, %(place_typep)s, %(namep)s,\n %(full_namep)s, %(bounding_box_jsonp)s) \n ON CONFLICT DO NOTHING;\n \n INSERT INTO tweets \n (tweet_id,\n tweet_text, tweet_source, in_reply_to_status_id, in_reply_to_user_id,\n tweet_date, place_id, user_id, geo_lat, geo_lng) \n VALUES \n (%(tweet_idt)s,\n %(tweet_textt)s, %(tweet_sourcet)s, %(in_reply_to_status_idt)s,\n %(in_reply_to_user_idt)s, %(tweet_datet)s, %(place_idt)s, %(user_idt)s,\n %(geo_latt)s, %(geo_lngt)s) \n ON CONFLICT DO NOTHING;\n \"\"\"\n parametersplace = {'id': userdict['id'],\n 'name': userdict['name'],\n 'screen_name': userdict['screen_name'],\n 'location': userdict['location'],\n 'url': userdict['url'],\n 'description': userdict['description'],\n 'verified': userdict['verified'],\n 'followers_count': userdict['followers_count'],\n 'friends_count': userdict['friends_count'],\n 'statuses_count': userdict['statuses_count'],\n 'geo_enabled': userdict['geo_enabled'],\n 'lang': userdict['lang'],\n 'created_at': parser.parse(userdict['created_at']),\n 'profile_background_image_url': userdict['profile_image_url'],\n 'idp': placedict['id'],\n 'urlp': placedict['url'],\n 'place_typep': placedict['place_type'],\n 'namep': placedict['name'],\n 'full_namep': placedict['full_name'],\n 'bounding_box_jsonp': json.dumps(placedict['bounding_box']),\n 'tweet_idt': temp['id'],\n 'tweet_textt': temp['text'],\n 'tweet_sourcet': temp['source'],\n 'in_reply_to_status_idt': temp['in_reply_to_status_id'],\n 'in_reply_to_user_idt': temp['in_reply_to_user_id'],\n 'tweet_datet': parser.parse(temp['created_at']),\n 'place_idt': placeid,\n 'user_idt': temp['user']['id'],\n 'geo_latt': lat, 'geo_lngt': lng}\n else:\n placeid = None\n insertsqlplace = \"\"\"\n INSERT INTO users \n (id, name, screen_name, location,\n url, description, verified, followers_count, friends_count,\n statuses_count, geo_enabled, lang, created_at,\n profile_background_image_url) \n VALUES\n (%(id)s, %(name)s, %(screen_name)s, %(location)s, %(url)s,\n %(description)s, %(verified)s, %(followers_count)s, %(friends_count)s,\n %(statuses_count)s, %(geo_enabled)s, %(lang)s, %(created_at)s,\n %(profile_background_image_url)s) \n ON CONFLICT DO NOTHING;\n\n INSERT INTO tweets \n (tweet_id,\n tweet_text, tweet_source, in_reply_to_status_id, in_reply_to_user_id,\n tweet_date, place_id, user_id, geo_lat, geo_lng) \n VALUES \n (%(tweet_idt)s,\n %(tweet_textt)s, %(tweet_sourcet)s, %(in_reply_to_status_idt)s,\n %(in_reply_to_user_idt)s, %(tweet_datet)s, %(place_idt)s, %(user_idt)s,\n %(geo_latt)s, %(geo_lngt)s) \n ON CONFLICT DO NOTHING;\n \"\"\"\n parametersplace = {'id': userdict['id'],\n 'name': userdict['name'],\n 'screen_name': userdict['screen_name'],\n 'location': userdict['location'],\n 'url': userdict['url'],\n 'description': userdict['description'],\n 'verified': userdict['verified'],\n 'followers_count': userdict['followers_count'],\n 'friends_count': userdict['friends_count'],\n 'statuses_count': userdict['statuses_count'],\n 'geo_enabled': userdict['geo_enabled'],\n 'lang': userdict['lang'],\n 'created_at': parser.parse(userdict['created_at']),\n 'profile_background_image_url': userdict['profile_image_url'],\n 'tweet_idt': temp['id'],\n 'tweet_textt': temp['text'],\n 'tweet_sourcet': temp['source'],\n 'in_reply_to_status_idt': temp['in_reply_to_status_id'],\n 'in_reply_to_user_idt': temp['in_reply_to_user_id'],\n 'tweet_datet': parser.parse(temp['created_at']),\n 'place_idt': placeid,\n 'user_idt': temp['user']['id'],\n 'geo_latt': lat, 'geo_lngt': lng}\n curr.execute(insertsqlplace,parametersplace)\n # commit the insert statements to the data base and then acknowledge\n # parsing of the message if successful.\n conn.commit()\n message.ack()\n if round(randint(0, 50000)/100)==250:\n print('Added ~'+str(250)+' tweets! '+temp['created_at'])\n except:\n print('Error loading tweet.')", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def get_tweets(candidate):\n statuses = api.GetUserTimeline(screen_name=candidate[\"twitter\"])\n # Use NLP to try to figure out where the tweets are\n for tweet in statuses:\n if tweet.user.screen_name == candidate[\"twitter\"]:\n parsed_location = nlp(tweet.text)\n for ent in parsed_location.ents:\n if ent.label_ == \"GPE\":\n location = geocode(ent.text)\n if \"results\" in location.keys():\n if len(location[\"results\"]) >= 1:\n # Look for a country\n for address_component in location[\"results\"][0][\"address_components\"]:\n if \"country\" in address_component[\"types\"] and address_component[\"short_name\"] == \"US\":\n print(tweet)\n return {\n \"tweet\": f\"https://twitter.com/{candidate['twitter']}/status/{tweet.id}\",\n \"user_image\": tweet.user.profile_image_url_https,\n \"lat\": location[\"results\"][0][\"geometry\"][\"location\"][\"lat\"],\n \"lon\": location[\"results\"][0][\"geometry\"][\"location\"][\"lng\"],\n \"location\": ent.text\n }\n return None", "def stream_tweets(bearer_token):\n print(\"Streaming tweets...\")\n\n oauth2 = osometweet.OAuth2(\n bearer_token=bearer_token,\n manage_rate_limits=False\n )\n ot = osometweet.OsomeTweet(oauth2)\n\n # Add all tweet fields\n all_tweet_fields = osometweet.TweetFields(everything=True)\n\n # Add streaming rules\n rules = [{\"value\": \"coronavirus\", \"tag\": \"all coronavirus tweets\"},\n {\"value\": \"indiana\", \"tag\": \"all indiana tweets\"}]\n add_rules = {\"add\": rules}\n response = ot.set_filtered_stream_rule(rules=add_rules)\n print(f\"API response from adding two rules:\\n{response}\\n\")\n\n # Retrieve active streaming rules\n current_rules = ot.get_filtered_stream_rule()\n print(f'The current filtered stream rules are:\\n{current_rules}\\n')\n\n # Remove a streaming rule by using it's tag\n indiana_rule = [\n rule[\"id\"] for rule in current_rules[\"data\"]\n if 'all indiana tweets' in rule[\"tag\"]\n ]\n delete_rule = {'delete': {'ids': indiana_rule}}\n response = ot.set_filtered_stream_rule(rules=delete_rule)\n print(f\"API response from deleting one rule:\\n{response}\\n\")\n\n # Get today's date\n today = dt.strftime(dt.today(), \"%Y-%m-%d_%H-%M\")\n\n # Open two files. One for good data, the other for tweet errors.\n with open(f\"tweet_stream--{today}.json\", \"a\") as data_file:\n # stream is a Generator\n stream = ot.filtered_stream(fields=all_tweet_fields)\n # We have to iterate over the stream to fetch streamed tweets\n for tweet in stream.iter_lines():\n # Get data and errors\n try:\n data = json.loads(tweet).get(\"data\")\n\n # When data is found, we write it to the open file\n if data:\n json.dump(data, data_file)\n data_file.write(\"\\n\")\n except json.JSONDecodeError:\n pass", "def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')", "def track(twitter, keywords=[], user_ids=[]):\n\n # Prepare for GET request\n streaming_url = \"https://stream.twitter.com/1.1/statuses/filter.json\"\n\n # Documentation for filter params:\n # https://dev.twitter.com/docs/streaming-apis/parameters\n params = {\"replies\": \"all\"}\n if keywords:\n params[\"track\"] = keywords\n if user_ids:\n params[\"follow\"] = user_ids\n\n # Create Request.get object\n r = twitter.get(url=streaming_url, params=params, stream = True)\n\n # Iterate over the request\n for line in r.iter_lines():\n if line :\n try:\n # TODO \n # Sometimes it returns a \"disconnect\" obj \n # before closing the stream\n tweet = json.loads(line)\n yield tweet\n except ValueError:\n # Couldn't construct a valid tweet\n pass", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n tweet = None\n includes = {}\n errors = []\n matching_rules = []\n\n if \"data\" in data:\n tweet = Tweet(data[\"data\"])\n self.on_tweet(tweet)\n if \"includes\" in data:\n includes = self._process_includes(data[\"includes\"])\n self.on_includes(includes)\n if \"errors\" in data:\n errors = data[\"errors\"]\n self.on_errors(errors)\n if \"matching_rules\" in data:\n matching_rules = [\n StreamRule(id=rule[\"id\"], tag=rule[\"tag\"])\n for rule in data[\"matching_rules\"]\n ]\n self.on_matching_rules(matching_rules)\n\n self.on_response(\n StreamResponse(tweet, includes, errors, matching_rules)\n )", "def Stream():\r\n \r\n config = config_create()\r\n CONSUMER_KEY = config.get('Auth', 'CONSUMER_KEY') \r\n CONSUMER_SECRET = config.get('Auth', 'CONSUMER_SECRET')\r\n ACCESS_KEY = config.get('Auth', 'ACCESS_KEY')\r\n ACCESS_SECRET = config.get('Auth', 'ACCESS_SECRET')\r\n searchterm = config.get('Filter','search')\r\n name = multiprocessing.current_process().name\r\n \"\"\"Function that will manage doing the twitter stream\"\"\"\r\n stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n stream.statuses.filter(track= searchterm)", "def process_tweet(data):\n decoded = json.loads(data)\n\n twt_text = decoded['text']\n # Do some stuff with the json data if tweet is a closure for specific bridge\n if bridgeName in twt_text and \"closed\" in twt_text:\n \t# Send and email or turn on a light or something\n \tprint \"BRIDGE CLOSED!!! Eat another piece of toast, pet the cats\"\n \tsendAlert(data=decoded, email=to_emailAddress)\n\n \t# temporarily limit the number of calls to one\n \tsys.exit('had a bridge event, shutting down')\n return True", "def stream_tweets(api_token: str, api_secret: str, access_token: str, access_secret: str, saver,\n keywords: list = None, users: list = None, locations: list = None, stall_warning: bool = False):\n\n auth = OAuthHandler(api_token, api_secret)\n auth.set_access_token(access_token, access_secret)\n api = API(auth)\n listener = TwitterListener(manager=saver, api=api)\n stream = Stream(auth=auth, listener=listener)\n log.write_log('Streaming started', 'execution')\n stream.filter(track=keywords, follow=users, locations=locations, stall_warnings=stall_warning)", "def on_data(self, _data):\n # There are times where the twitter stream fails and will send a NoneType object\n if not _data:\n print('**************************************************')\n print('**** Stream error, received empty data object ****')\n print('**************************************************')\n return\n\n data = json.loads(_data)\n\n # Only follow the created at tweets, ignoring replies, etc when trolling\n if CREATED_AT in data:\n if data[USER][ID_STRING] not in self.trolling_ids:\n return\n\n # Only troll raw tweets, not replies\n if data[REPLY_TO_STATUS_ID]:\n return\n\n print('New tweet, current count:', self.received_tweet_count)\n\n # If received enough than retweet it\n if self.received_tweet_count == TWEET_INTERVAL:\n raw_tweet_url = TWITTER_URL + data[USER][SCREEN_NAME] + STATUS_PATH + data[ID_STRING]\n print(\n '\\n== New Tweet Received @', datetime.now(),\n 'Tweet URL:', raw_tweet_url, '=='\n )\n\n time.sleep(RETWEET_WAIT_PERIOD)\n self.twitter_api.retweet(data[ID])\n\n print('\\n== Successfully retweeted! ==')\n\n self.received_tweet_count = 0\n\n else:\n self.received_tweet_count += 1", "def on_tweet(self, tweet):\n pass", "def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)", "def on_data(self,data):\n\n try:\n raw_data = json.loads(data)\n\n if 'text' in raw_data:\n\n created_at = raw_data['created_at']\n username = raw_data['user']['screen_name']\n location = raw_data['user']['location']\n followers_count = raw_data['user']['followers_count']\n tweet_id = raw_data['id']\n\n if 'extended_tweet' in raw_data:\n tweet = raw_data['extended_tweet']['full_text']\n else:\n tweet = raw_data['text']\n\n connect(created_at, username, tweet, location, followers_count, tweet_id)\n print(f'Tweet collected at: {str(created_at)}')\n\n except Error as e:\n print(e)", "def tweet_processor(self, tweets):\n with Timer() as timer:\n detection_count = self.tweet_processor_fct(tweets) or 0\n # Increment the total number of detections.\n self.redis.hincrby(self.metadata_cache_key, 'detection',\n detection_count)\n\n log.debug(\"Processed {} tweets in {:2.3f} secs.\".format(\n len(tweets), timer.interval))", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def post_to_tweets(data, url):\n\n print(\"here,\", url)\n\n albums = find_all_images(data['content'])\n text = strip_text(data['content'])\n\n \"\"\"Where applicable, the images are associated with the text. This means, that to make an appropriate thread the\n conversion from a post to tweets should take into account how words relate to images in a spacial way. For this\n reason, we convert to tweets in batches.\"\"\"\n\n cfg = get_keys() # grab keys\n api = get_api(cfg) # setup API\n in_reply_to = None\n twitter_url = 'https://twitter.com'\n\n # for idx, caption in enumerate(text):\n # if idx > 0:\n # url_img = None\n # caption = re.findall(r\"[\\w']+|[.!?;]\\ \", caption)\n # text[idx] = text_to_tweets(caption, url_img)\n\n try:\n if data['in_reply_to'] is not None: # if post is reply ...\n for reply in data['in_reply_to']:\n if reply[:len(twitter_url)] == twitter_url: # if the URL points to twitter ...\n in_reply_to = reply.split('/')[-1:] # ... get the status id\n except KeyError:\n pass\n\n url = 'https://' + DOMAIN_NAME + url\n\n tweets = text_to_tweets(text, url=url) # process string into tweet thread\n\n # try and parse a lat lng.\n try:\n lat, lng = data['geo'][4:].split(\",\")\n except KeyError:\n lat, lng = None, None\n\n # post the first tweet so that we have a status id to start the thread\n status = api.update_status(status=tweets[0].pop(0), in_reply_to_status_id=in_reply_to)\n first_id = status.id # the id which points to origin of thread\n\n for album_group in text:\n try:\n media = album_group.pop(0) # get the corresponding album\n for tweet in album_group:\n status = api.update_with_media(filename=media, status=tweet, in_reply_to_status_id=status.id, lat=lat, long=lng)\n media = None\n except IndexError: # if we're out of albums...\n pass\n return 'http://twitter.com/{name}/status/{id}'.format(name=status.user.screen_name, id=first_id, lat=lat, lng=lng)", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)" ]
[ "0.6893244", "0.6432158", "0.64183885", "0.62337077", "0.6224815", "0.61122423", "0.61118174", "0.6083181", "0.606989", "0.60397", "0.6024835", "0.60062283", "0.5928235", "0.5867464", "0.5866802", "0.58547837", "0.5847184", "0.5831476", "0.5820497", "0.58133286", "0.57719487", "0.57683694", "0.5734892", "0.5700745", "0.56749845", "0.56739795", "0.56616354", "0.5647031", "0.56462514", "0.5630321" ]
0.6440632
1
Get all player properties
def player_properties(self): return self.properties.GetAll(self.player_interface)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getProperties():", "def getPropertiesAll():", "def _get_player_info(self):\n return [player._player_info() for player in self.players.values()]", "def get_properties():", "def get_players(self):\r\n return self.players.values()", "def get_properties(self):\n return self.properties", "def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...", "def get_properties(self):\n return self.properties", "def getProperties(self):\n return self.properties", "def get_player_attributes(self):\n return Player_Attributes.read_by_player_fifa_api_id(self.player_fifa_api_id)", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def properties(self):\n return self._props", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def players(self):\n return self._get(\"players\")", "def get_players(self, all=False):\n if all:\n return self.all_players\n else:\n return self.players", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def get_player_items(self):\n return self.player.items", "def properties_get(self):\n return self._get('properties')", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats", "def getProperties(self, prop_colour):\n props = database_creator.db.query(\n \"SELECT name FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n properties = []\n for i in props:\n properties.append(i[\"name\"])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def properties(self) -> Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def get_properties(self):\n\n properties = {}\n for iface_name in self.all_interfaces:\n iface = getattr(self, iface_name, None)\n if iface:\n properties.update(iface.get_properties())\n return properties" ]
[ "0.70561594", "0.7029557", "0.7014769", "0.69219154", "0.68711734", "0.6756985", "0.6735967", "0.6679412", "0.66471547", "0.6595262", "0.65539634", "0.65072423", "0.65060073", "0.65060073", "0.64347965", "0.6346386", "0.6327121", "0.62983334", "0.62983334", "0.6272479", "0.62618864", "0.62599677", "0.6254324", "0.6254185", "0.62124276", "0.6194011", "0.617899", "0.61671776", "0.61671776", "0.61663014" ]
0.898955
0
open media from URI and start playback
def open(self, uri): try: self.player.OpenUri(uri) except AttributeError as ex: raise UnsupportedOperation( f'{self.name} does not support opening URIs') from ex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, uri):\n if self.p:\n self.p.stop()\n self.p = self.vlc.media_player_new(uri)\n Player._finished = False\n e = self.p.event_manager()\n e.event_attach(vlc.EventType.MediaPlayerEndReached, self.__end_reached, None)\n if (not '://' in uri or uri.startswith('file://')) and os.stat(uri).st_size < 100:\n self._finished = True\n else:\n self.p.play()", "def play_media(self, item):\n self.play_media_event.clear()\n\n def app_launched_callback():\n try:\n self._send_start_play(item)\n finally:\n self.play_media_event.set()\n\n self.launch(app_launched_callback)", "def play_media(self, media=None, **kwargs):\n self.play_media_event.clear()\n\n def app_launched_callback(): # pylint: disable=missing-docstring\n try:\n self._send_start_play(media, **kwargs)\n finally:\n self.play_media_event.set()\n\n self.launch(app_launched_callback)", "def start_stream(self):\n self.handle = lt.add_magnet_uri(self.lt_ses, self.queue[0].magnet_link, # pylint: disable=no-member\n self.params)\n self.handle.set_sequential_download(True)\n\n self.stream_thread = threading.Thread(target=self._stream,\n name='stream')\n self.stream_thread.start()", "def load(self, uri):\n\n self.stop()\n self.uri = uri\n if os.path.exists(self.uri):\n uri = \"%s%s%s\" % (\"\\\"\", self.uri, \"\\\"\")\n\n cache_pantalla = \"%s -cache %i -wid %i\" % (\n MPLAYER, 1024, self.ventana_id)\n\n estructura = \"%s -slave -idle -nolirc\" % (cache_pantalla)\n estructura = \"%s -rtc -nomouseinput\" % estructura\n estructura = \"%s -noconsolecontrols -nojoystick\" % estructura\n\n self.mplayer = subprocess.Popen(\n estructura, shell=True, stdin=subprocess.PIPE,\n stdout=open(STDOUT, \"w+b\"), stderr=open(STDOUT, \"r+b\"),\n universal_newlines=True)\n\n self.entrada = self.mplayer.stdin\n self.salida = open(STDOUT, \"r\")\n self.entrada.write(\"loadfile %s 0\\n\" % uri)\n self.entrada.flush()\n self.video_in_stream = False\n\n self.__new_handle(True)", "def play(self, stream_url):\n print(\"Ready to play \" + stream_url)\n self.close()\n\n opts = [\"mplayer\", \"-quiet\", \n \"-slave\",\n \"-softvol\",\n \"-cache\", str(self.config.get('cache-kb')),\n \"-cache-min\", str(self.config.get('cache-min')),\n \"-volume\", str(self.config.get('volume'))]\n\n if stream_url.split(\"?\")[0][-3:] in ['m3u', 'pls']:\n opts.extend([\"-playlist\", stream_url])\n else:\n opts.extend([stream_url])\n\n self.process = subprocess.Popen(opts, shell=False,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n thread.start_new_thread(self.updateStatus, ())", "async def async_media_play(self) -> None:\n await self._projector.send_command(PLAY)", "def play(filename):\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener =\"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])", "async def async_media_play(self) -> None:\n await self._volumio.play()", "async def async_media_play(self):\n if not self._slave_mode:\n if self._state == STATE_PAUSED:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:resume\", None)\n\n elif self._prev_source != None:\n temp_source = next((k for k in self._source_list if self._source_list[k] == self._prev_source), None)\n if temp_source == None:\n return\n\n if temp_source.startswith('http') or temp_source == 'udisk' or temp_source == 'TFcard':\n await self.async_select_source(self._prev_source)\n if self._source != None:\n self._source = None\n value = \"OK\"\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:play\", None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:play\", None)\n\n if value == \"OK\":\n self._state = STATE_PLAYING\n self._unav_throttle = False\n #self._playing_tts = False\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if self._slave_list is not None:\n for slave in self._slave_list:\n await slave.async_set_state(self._state)\n await slave.async_set_position_updated_at(self.media_position_updated_at)\n else:\n _LOGGER.warning(\"Failed to start or resume playback. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_play()", "def play(self) -> None:\n self.system.notify(\"Jarvis::Play\")\n self.media.play()", "def load(self, media):\n path = self.check_media(media)\n if path is False:\n self._log(\"warning\", \"Unknown media {0} => aborting\".format(media))\n #return False prevent continue to play or play last media, force send unknow file to load to vlc\n # self.stdin_queue.put_nowait()\n self._direct_stdin_writer(\"load {0}\".format(path))", "def play_film(self, file):\n directory_name = os.path.dirname(file)\n file_name = os.path.basename(file)\n self.Media = self.vlc_instance.media_new(\n str(os.path.join(directory_name, file_name))\n )\n #self.Media.get_meta()\n self.vlc_media_player_instance.set_media(self.Media)\n self.vlc_media_player_instance.set_xwindow(self.get_handle())\n self.play()", "def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())", "def play(videoid):\n common.debug('Playing {}'.format(videoid))\n metadata = api.metadata(videoid)\n common.debug('Metadata is {}'.format(metadata))\n\n if not _verify_pin(metadata[0].get('requiresPin', False)):\n ui.show_notification(common.get_local_string(30106))\n xbmcplugin.endOfDirectory(g.PLUGIN_HANDLE, succeeded=False)\n return\n\n list_item = get_inputstream_listitem(videoid)\n infos, art = infolabels.add_info_for_playback(videoid, list_item)\n common.debug('Sending initialization signal')\n common.send_signal(common.Signals.PLAYBACK_INITIATED, {\n 'videoid': videoid.to_dict(),\n 'infos': infos,\n 'art': art,\n 'timeline_markers': get_timeline_markers(metadata[0]),\n 'upnext_info': get_upnext_info(videoid, (infos, art), metadata)})\n xbmcplugin.setResolvedUrl(\n handle=g.PLUGIN_HANDLE,\n succeeded=True,\n listitem=list_item)", "def play_media(self, media=None, **kwargs):\n args = {\"version\": self.pms.version}\n args.update(kwargs)\n super().play_media(media, **args)", "def media_play(self):\n self._state = STATE_PLAYING", "def media_play(self):\n self._state = STATE_PLAYING", "def OnPlay(self):\r\n # check if there is a file to play, otherwise open a\r\n # Tk.FileDialog to select a file\r\n print(\"1-1\")\r\n\r\n\r\n self.Media = self.Instance.media_new(self.youtube_url)\r\n self.player.set_media(self.Media)\r\n\r\n # set the window id where to render VLC's video output\r\n if platform.system() == 'Windows':\r\n print(\"1-3\")\r\n self.player.set_hwnd(self.GetHandle())\r\n else:\r\n print(\"1-4\")\r\n self.player.set_xwindow(self.GetHandle()) # this line messes up windows\r\n # FIXME: this should be made cross-platform\r\n\r\n # Try to launch the media, if this fails display an error message\r\n if self.player.play() == -1:\r\n print(\"1-6\")\r\n self.errorDialog(\"Unable to play.\")", "def _send_start_play(self, media=None, **kwargs):\n msg = media_to_chromecast_command(\n media, requestiId=self._inc_request(), **kwargs\n )\n self.logger.debug(\"Create command: \\n%r\\n\", json.dumps(msg, indent=4))\n self._last_play_msg = msg\n self._send_cmd(\n msg,\n namespace=\"urn:x-cast:com.google.cast.media\",\n inc_session_id=True,\n inc=False,\n )", "async def play_url(self, url: str):\n await self._pytheos.api.browse.play_url(self.id, url)", "def play(self, filename, callback) :\n raise NotImplementedError(\"play not implemented\")", "def play_movie(self, url):\n raise NotImplementedError", "async def async_play_media(self, media_type, media_id, **kwargs):\n _LOGGER.debug(\"Playback request for %s / %s\", media_type, media_id)\n await self.coordinator.data.play_media(self.zone_id, media_id)\n await self.coordinator.async_refresh()", "async def async_play_media(self, media_type, media_id, **kwargs):\n if media_id.isdigit():\n currentgallery_id = self._gallery_status[\"current_gallery\"]\n currentitems = await self.local_meural.send_get_items_by_gallery(currentgallery_id)\n in_playlist = next((g[\"title\"] for g in currentitems if g[\"id\"] == media_id), None)\n if in_playlist is None:\n# _LOGGER.warning(\"Item %s is not in current playlist, trying to play via remote API.\", media_id)\n await self.meural.device_load_item(self.meural_device_id, media_id)\n else:\n# _LOGGER.warning(\"Item %s in current playlist %s, loading locally.\", media_id, self._gallery_status[\"current_gallery_name\"])\n await self.local_meural.send_change_item(media_id)\n else:\n _LOGGER.warning(\"Can't play media: %s is not an item ID\", media_id)", "def launch_player(stream_uri):\n cmd = [\n config.get('player', 'launch_cmd'),\n '\"%s\"' % stream_uri,\n ] + ['--{}'.format(p) for p in config.get('player', 'parameters').split(',')]\n subprocess.call(' '.join(cmd), shell=True)", "def _play_audio(self, path_or_location):\n url = path_or_location.replace('https', 'http')\n audi_commd = self._vlc_audio_command + [url]\n logger.info('VLC command: {}'.format(audi_commd))\n process = subprocess.Popen(audi_commd)\n self._player_pid = process.pid\n logger.info(\"vlc pid \" + str(process.pid))\n\n # add pid to child_pids\n self._child_pids[process.pid] = True", "async def async_play_media(self, media_type, media_id, **kwargs):\n _LOGGER.debug(\"Trying to play media. Device: %s, Media_type: %s, Media_id: %s\", self.entity_id, media_type, media_id)\n if not self._slave_mode:\n\n if not (media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_URL, MEDIA_TYPE_TRACK] or media_source.is_media_source_id(media_id)):\n _LOGGER.warning(\"For: %s Invalid media type %s. Only %s and %s is supported\", self._name, media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_URL)\n await self.async_media_stop()\n return False\n \n if not self._snapshot_active:\n self._playing_mediabrowser = False\n self._nometa = False\n\n if kwargs.get(ATTR_MEDIA_ANNOUNCE):\n _LOGGER.debug(\"For: %s, Announce parameter set, Media_id: %s\", self._name, media_id)\n self._announce = True\n #self._playing_tts = True\n await self.async_snapshot(False)\n self._playing_mediabrowser = False\n self._playing_mass = False\n else:\n self._playing_tts = False\n self._announce = False\n\n if media_source.is_media_source_id(media_id):\n play_item = await media_source.async_resolve_media(self.hass, media_id, self.entity_id)\n if media_id.find('radio_browser') != -1: # radios are an exception, be treated by server redirect checker and icecast metadata parser\n self._playing_mediabrowser = False\n else:\n self._playing_mediabrowser = True\n\n if media_id.find('media_source/local') != -1:\n self._media_source_uri = media_id\n else:\n self._media_source_uri = None\n\n media_id = play_item.url\n if not play_item.mime_type in ['audio/basic',\n 'audio/mpeg', \n 'audio/mp3', \n 'audio/mpeg3', \n 'audio/x-mpeg-3',\n 'audio/x-mpegurl', \n 'audio/mp4', \n 'audio/aac', \n 'audio/x-aac',\n 'audio/x-hx-aac-adts', \n 'audio/x-aiff', \n 'audio/ogg', \n 'audio/vorbis', \n 'application/ogg', \n 'audio/opus', \n 'audio/webm', \n 'audio/wav', \n 'audio/x-wav', \n 'audio/vnd.wav', \n 'audio/flac',\n 'audio/x-flac', \n 'audio/x-ms-wma']:\n _LOGGER.warning(\"For: %s Invalid media type, %s is not supported\", self._name, play_item.mime_type)\n self._playing_mediabrowser = False\n return False\n \n media_id = async_process_play_media_url(self.hass, media_id)\n _LOGGER.debug(\"Trying to play HA media. Device: %s, Play_Item: %s, Media_id: %s\", self._name, play_item, media_id)\n\n media_id_check = media_id.lower()\n\n if media_id_check.startswith('http'):\n media_type = MEDIA_TYPE_URL\n\n if media_id_check.find('8095/media_player') != -1: # Music Assistant exception, not to be treated by server redirect checker and other metadata parsers\n self._playing_mass = True\n else:\n self._playing_mass = False\n\n if media_id_check.endswith('.m3u') or media_id_check.endswith('.m3u8'):\n _LOGGER.debug(\"For: %s, Detected M3U list: %s\", self._name, media_id)\n media_id = await self.async_parse_m3u_url(media_id)\n\n if media_id_check.endswith('.pls'):\n _LOGGER.debug(\"For: %s, Detected PLS list: %s\", self._name, media_id)\n media_id = await self.async_parse_pls_url(media_id)\n\n if media_type == MEDIA_TYPE_URL:\n if self._playing_mediabrowser or self._playing_mass:\n media_id_final = media_id\n else:\n media_id_final = await self.async_detect_stream_url_redirection(media_id)\n\n if self._fwvercheck(self._fw_ver) >= self._fwvercheck(FW_SLOW_STREAMS) and self._state == STATE_PLAYING:\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n \n if self._playing_spotify: # disconnect from Spotify before playing new http source\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:switchmode:wifi\", None)\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:play:{0}\".format(media_id_final), None)\n if value != \"OK\":\n _LOGGER.warning(\"Failed to play media type URL. Device: %s, Got response: %s, Media_Id: %s\", self.entity_id, value, media_id)\n return False\n\n elif media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_TRACK]:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:playLocalList:{0}\".format(media_id), None)\n if value != \"OK\":\n _LOGGER.warning(\"Failed to play media type music. Device: %s, Got response: %s, Media_Id: %s\", self.entity_id, value, media_id)\n return False\n\n self._state = STATE_PLAYING\n self._media_title = None\n self._media_artist = None\n self._media_album = None\n self._icecast_name = None\n self._playhead_position = 0\n self._duration = 0\n self._trackc = None\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n self._media_image_url = None\n self._ice_skip_throt = True\n self._unav_throttle = False\n if media_type == MEDIA_TYPE_URL:\n self._media_uri = media_id\n self._media_uri_final = media_id_final\n elif media_type == MEDIA_TYPE_MUSIC:\n self._media_uri = None\n self._media_uri_final = None\n if self._announce:\n self.async_schedule_update_ha_state(True)\n _LOGGER.debug(\"For: %s, Announce started: %s\", self._name, media_id)\n return True\n else:\n if not self._snapshot_active:\n await self._master.async_play_media(media_type, media_id)", "async def async_play_media(self, media_type, media_id, **kwargs):\n if self._raumfeld.rooms_are_valid(self._rooms):\n if media_type in SUPPORTED_MEDIA_TYPES:\n if media_type == MEDIA_TYPE_MUSIC:\n if media_id.startswith(\"http\"):\n play_uri = media_id\n else:\n log_error(\"Unexpected URI for media type: %s\" % media_type)\n elif media_type in [\n UPNP_CLASS_ALBUM,\n UPNP_CLASS_LINE_IN,\n UPNP_CLASS_PLAYLIST_CONTAINER,\n UPNP_CLASS_PODCAST_EPISODE,\n UPNP_CLASS_RADIO,\n UPNP_CLASS_TRACK,\n ]:\n if MEDIA_CONTENT_ID_SEP in media_id:\n play_uri = media_id.split(MEDIA_CONTENT_ID_SEP)[1]\n else:\n play_uri = media_id\n else:\n log_error(\"Unhandled media type: %s\" % media_type)\n if self.state == STATE_OFF:\n await self.async_turn_on()\n log_debug(\"self._rooms=%s, play_uri=%s\" % (self._rooms, play_uri))\n await self._raumfeld.async_set_av_transport_uri(self._rooms, play_uri)\n else:\n log_error(\"Playing of media type '%s' not supported\" % media_type)\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )", "def open(self):\n file = askopenfilename(\n initialdir=self.initial_directory,\n filetypes=(\n (\"Audio Video Interleave\", \"*.avi\"),\n (\"Matroska\", \"*.mkv\"),(\"MPEG-4 AVC\",\"*.mp4\"),\n )\n )\n if isinstance(file, tuple):\n return\n if os.path.isfile(file):\n self.play_film(file)" ]
[ "0.69650835", "0.6877499", "0.6683258", "0.6447326", "0.64142007", "0.63149434", "0.63062537", "0.62950337", "0.6292141", "0.6209253", "0.60831624", "0.6058879", "0.60428107", "0.6024753", "0.6005289", "0.59741634", "0.59693086", "0.59693086", "0.59669644", "0.5965879", "0.5961714", "0.5953808", "0.5952601", "0.59336275", "0.5922426", "0.5920979", "0.5906543", "0.5872192", "0.5871622", "0.58691996" ]
0.74241686
0
Get the list of available MPRIS2 services
def get_services(): services = [] bus = pydbus.SessionBus() for s in bus.get('.DBus').ListNames(): if s.startswith(MprisService.mpris_base): services.append(s) return services
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_services(self):\r\n return get_service_list()", "def available_services(self) -> list[str]:\r\n return self.services", "def getServices(self):\n pass", "def list_services(ctx):\n pass", "def available_services(cls) -> List[str]:\n ret = []\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n ret.append(name)\n return ret", "def get_services(self):\n\t\t#Entrega el dict sin miramientos\n\t\treturn self._services", "def services(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/services')", "def getAllServices(self) -> List[ghidra.framework.plugintool.ServiceInterfaceImplementationPair]:\n ...", "def cmd_SERVICES(self):\r\n return self._ros.get_services()", "def list_magnum_services(self):\n return list(self.container_infrastructure_management.services())", "def available_services():\n all_datas = ()\n data = ()\n\n for class_path in settings.TH_SERVICES:\n class_name = class_path.rsplit('.', 1)[1]\n # 2nd array position contains the name of the service\n data = (class_name, class_name.rsplit('Service', 1)[1])\n all_datas = (data,) + all_datas\n return all_datas", "def list_services(self, **kwargs: Optional[Any]) -> list:\n\n self.logger.debug(\"list_services: %s\", kwargs)\n\n namespace = kwargs.get(\"namespace\", \"global\")\n\n return self.AD.services.list_services(namespace) # retrieve services", "def get_services(self):\n\n return list(self.services.values())", "def get_service_list():\n service_dict = requests.get('http://consul:8500/v1/catalog/services').json()\n service_list = []\n for s in service_dict:\n service_list.append(s)\n return service_list", "def get_offline_services(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetOfflineServices', self.handle))", "def list_services(self,honeypotids):\n req = {\"type\":\"get_all_services\",\n \"to\":honeypotids,\n \"from\":self.network.mc_id}\n expect_dict = {\"type\":\"send_all_services\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"services\"]\n return answer", "def get_services(self):\n try:\n response = requests.get(\n Untiny.SERVICES_URL,\n params=dict(format=\"text\")\n )\n except requests.RequestException:\n return set()\n\n return set([s.strip() for s in response.text.split(',')])", "def list_services(self, **params):\n url = 'os-services'\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n schema = self.get_schema(self.schema_versions_info)\n self.validate_response(schema.list_services, resp, body)\n return rest_client.ResponseBody(resp, body)", "def _get_services(self, services):\n\n services_info = []\n\n for service in services[1]:\n services_info.append(self._make_dict(service))\n \n return services_info", "def get_services(self): \n if self._access_token is None:\n raise RequiresAccessTokenError()\n\n response = self.__make_oauth_request(ADD_URLS_FOR_SERVICES_URL, token=self._access_token, signed=True)\n return simplejson.loads(response.read()).keys()", "def test_ipam_services_list(self):\n pass", "def services(self):\r\n return services.Services(self)", "def get_services_list(self, services):\n if not services:\n return []\n\n return [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]", "def getNodeServiceList(self,node):\n data = self.connect('get','nodes/%s/services' % (node),None)\n return data", "def get_services(self):\n ret = self.v1_service_list.get()\n services = {each.metadata.namespace: each.metadata.name for each in ret.items}\n\n return services", "def get_services(**options):\r\n return {}", "def network_service_providers(self):\n path = '/v2.0/service-providers'\n res = self.network.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack network service providers: %s' % \n truncate(res))\n return res[0]['service_providers']", "def list_services(self):\n response = self._get()\n\n services = []\n for s in response[\"services\"]:\n services.append(_create_service_from_json(s, self._session, self._url_base, s[\"folderName\"]))\n\n return services", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def services(self) -> dict:\n return self.data[\"services\"]" ]
[ "0.7494182", "0.7366018", "0.7262642", "0.72475517", "0.7116699", "0.7058195", "0.698019", "0.694159", "0.69262844", "0.6915469", "0.68327886", "0.666419", "0.6658179", "0.66437644", "0.6617939", "0.65888256", "0.65527135", "0.64693516", "0.645205", "0.64206463", "0.64191586", "0.64144695", "0.6399693", "0.6379422", "0.63702786", "0.63647026", "0.63451034", "0.6306425", "0.62860733", "0.6277339" ]
0.7681787
0
Convert track length in microseconds into human readable format
def track_length_string(length): return str(timedelta(microseconds=length))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_to_string(value):\n if value == gst.CLOCK_TIME_NONE:\n return \"--:--:--.---\"\n ms = value / gst.MSECOND\n sec = ms / 1000\n ms = ms % 1000\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n return \"%02d:%02d:%02d.%03d\" % (hours, mins, sec, ms)", "def beautify_length(length):\n sec = length / gst.SECOND\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n\n parts = []\n if hours:\n parts.append(ngettext(\"%d hour\", \"%d hours\", hours) % hours)\n\n if mins:\n parts.append(ngettext(\"%d minute\", \"%d minutes\", mins) % mins)\n\n if not hours and sec:\n parts.append(ngettext(\"%d second\", \"%d seconds\", sec) % sec)\n\n return \", \".join(parts)", "def _convert_sfx_timestamp(ts: int) -> float:\n return float(ts) / 1000", "def human_hz(v):\n if v < 1e3:\n return (v, 'Hz')\n if v < 1e6:\n return (v/1.0e3, 'kHz')\n if v < 1e9:\n return (v/1.0e6, 'MHz')\n return (v/1.0e9, 'GHz')", "def millis_to_human_readable(millis):\n s=millis/1000000\n m,s=divmod(s,60)\n\n return str(m) + \":\" + str(s)", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def perfcounter_to_str(val):\n return f\"{math.floor(val / 60)}m {math.floor(val % 60)}s {math.floor((val % 1) * 1000)}ms\"", "def units_to_msec(units, resolution):\n time_ms = units * float(resolution) / 1000\n return time_ms", "def format_time(value: float) -> str:\n if value <= 0.01:\n return f\"{value * 1000000:.0f}us\"\n elif value <= 0.1:\n return f\"{value * 1000:.1f}ms\"\n elif value > 172800:\n return f\"{value / 86400:.2f}days\"\n elif value > 86400:\n return f\"{value / 86400:.2f}day\"\n elif value > 1800:\n return f\"{value / 3600:.2f}hr\"\n elif value > 60:\n return f\"{value / 60:.2f}min\"\n return f\"{value:.2f}s\"", "def format_seconds(duration):\n\treturn stats_utils.format_seconds(duration)", "def time_string(time_f: float) -> str:\n m, s = divmod(time_f, 60)\n h, m = divmod(m, 60)\n\n if h < 1:\n if m < 1 and s < 1:\n msec = int(s * 1000)\n return '{:=03d}msec'.format(msec)\n\n if m < 1:\n return '{:=02.0f}sec'.format(s)\n\n return '{:=02.0f}min:{:=02.0f}sec'.format(m, s)\n else:\n return '{:=01.0f}h:{:=02.0f}min:{:=02.0f}sec'.format(h, m, s)", "def us(self):\n return 1000 * 1000 * self.read()", "def to_length_secs(self):\n return (self.bpm / 60.0) / self.period", "def truncate_microsec(curr_time=None):\n time_str = curr_time.strftime(\"%H %M %S %f\")\n return time_str[0:-3]", "def PreciseTime(self):\n return '%2.2d:%2.2d:%06.3f' % (self._hour, self._minute, self._second)", "def get_formatted_duration(self, prev_time):\n duration = time() - prev_time\n if duration < 60:\n unit = 's'\n elif duration < 3600:\n duration /= 60\n unit = 'm'\n else:\n duration /= 3600\n unit = 'h'\n return self.format_num(duration) + unit", "def human_seconds(seconds, fmt=\"%.3g %s\"):\n t = 1e6 * seconds # start with µsec\n for suff in \"usec msec\".split():\n if t < 1000:\n return fmt % (t, suff)\n t /= 1000\n return fmt % (t, \" sec\")", "def find_track_length(msg, msg_type, seconds):\n if msg_type == \"TINFO\" and msg[1] == \"9\":\n len_hms = msg[3].replace('\"', '').strip()\n hour, mins, secs = len_hms.split(':')\n seconds = int(hour) * 3600 + int(mins) * 60 + int(secs)\n return seconds", "def format_time(value: int) -> str:\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d%H%M%S')", "def timestr(msec):\n sec = float(msec) / 1000\n\n hours = int(sec / 3600)\n sec -= hours * 3600\n\n minutes = int(sec / 60)\n sec -= minutes * 60\n\n return f\"{hours:02d}:{minutes:02d}:{sec:06.3f}\".replace(\".\", \",\")", "def get_elapsed_timestamp(self) -> str:\n t = self.elapsed_time\n minutes = int(t / 60)\n seconds = int(t - (60 * minutes))\n millis = int(100 * (t - int(t)))\n return '{:>02d}:{:>02d}.{:<02d}'.format(minutes, seconds, millis)", "def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)", "def get_track_length(track_path):\n track_extension = os.path.splitext(track_path)[1]\n if track_extension:\n try:\n mutagen_track = File(track_path)\n track_total_length = mutagen_track.info.length\n except:\n track_total_length = 0\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_path}\")\n finally:\n track_length_formated = strftime(\n '%M:%S', gmtime(track_total_length))\n track_length_label.configure(text=track_length_formated)\n track_pos_slider.configure(to=track_total_length)\n return track_total_length", "def get_track_length(duration):\n try:\n length = time.strptime(duration, '%M:%S')\n except ValueError:\n return None\n return length.tm_min * 60 + length.tm_sec", "async def humanize_time(self, value):\n if value is None:\n return \"None\"\n return str(datetime.timedelta(seconds=value))", "def get_duration_track(artist, track):\n track_infos = get_infos(artist, track)\n if track_infos == None :\n return None\n return int(track_infos['track']['duration']) / 60000", "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def humantime(seconds: float) -> str:\n return redivmod(seconds, [(60, \"seconds\"),\n (60, \"minutes\"),\n (24, \"hours\"),\n (7, \"days\"),\n (52, \"weeks\"),\n (0, \"years\")])", "def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]" ]
[ "0.64983374", "0.6474109", "0.6469716", "0.6338871", "0.6206818", "0.61407804", "0.61407804", "0.61238563", "0.603152", "0.60117567", "0.59859645", "0.5964505", "0.59626114", "0.5934192", "0.5932881", "0.59158444", "0.59021026", "0.5892526", "0.58827716", "0.5862022", "0.58576185", "0.5829539", "0.58196974", "0.5803696", "0.5796993", "0.57924324", "0.5785612", "0.57658195", "0.57605547", "0.57493216" ]
0.72623426
0
Remove romans numbers from a quote.
def clean_num(quote): for char in ROMAN: quote = quote.replace(*char) return quote
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def remove_numbers(text):\n return ''.join([i for i in text if not i.isdigit()])", "def strip_numbers(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"-?\\d+\")\n return re.sub(regex, \"\", text)", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def _remove_digits(text: str) -> str:\n table = str.maketrans('', '', digits)\n\n return text.translate(table)", "def sanitize_text(text):\n return re.sub(r\"\\d+\", \"\", text)", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def remove_flight_numbers(text):\n return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))", "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def clean_rent(self, rent):\n # assume rent is either int/float or str\n if isinstance(rent, str):\n return int(rent.replace('$', '').replace(',', ''))\n else:\n return rent", "def strip_non_digits(x: str) -> str:\n exp = re.compile(\"[^\\d]+\")\n return re.sub(exp, \"\", x)", "def compact(number):\n return clean(number, ' -./,').strip()", "def keep_digits(x: str) -> str:\n return \"\".join([c for c in x if c.isdigit()]).strip()", "def replace_numbers(words):\n p = inflect.engine()\n remove_numbers = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n remove_numbers.append(new_word)\n else:\n remove_numbers.append(word)\n return remove_numbers", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def stripname(name, stripnums = True):\n\tfor pattern in removestuffregex:\n\t\tname = re.sub(pattern, \"\", name)\n\tif stripnums:\n\t\tname = re.sub(numberregex, \"\", name)\n\tfor pattern in removestuff:\n\t\tname = name.replace(pattern, \"\")\n\treturn name", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x" ]
[ "0.6802162", "0.67661715", "0.67661715", "0.67661715", "0.65662754", "0.65186864", "0.65051675", "0.6495161", "0.6374237", "0.6357037", "0.63561016", "0.6348042", "0.6322571", "0.6312734", "0.6128238", "0.6105336", "0.60863405", "0.6003035", "0.58949673", "0.58674973", "0.58010054", "0.5769308", "0.5763987", "0.575551", "0.5728231", "0.57155824", "0.56519765", "0.56489575", "0.5629401", "0.55619097" ]
0.8168946
0
Convert a text to ASCII format.
def to_ascii(text): return re.sub(r'[^\x00-\x7F]+', ' ', text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __unicode_to_ascii(text):\n line = unicodedata.normalize('NFKD', text)\n return ''.join(c for c in line if not unicodedata.combining(c))", "def _flatten_to_ascii(txt):\r\n if isinstance(txt, str):\r\n txt = txt.decode('utf-8')\r\n return unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore')\r\n else:\r\n return unicode(unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore'))", "def to_ascii(self):\n code = self.build()\n for i, line in enumerate(code):\n code[i] = line.replace('1', '|').replace('0', '_')\n return '\\n'.join(code)", "def to_ascii(s):\n return s.encode('ascii', 'ignore')", "def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])", "def normalize(self, text):\n\n return binascii.hexlify(text)", "def asciitize(text):\n if sys.version_info[0] < 3:\n # python 2\n if isinstance(text, unicode):\n text = text.encode('ascii', 'ignore')\n if isinstance(text, str):\n text = text.decode('ascii', 'ignore')\n else:\n # python 3\n if isinstance(text, str):\n text = text.encode('ascii', 'ignore')\n if isinstance(text, bytes):\n text = text.decode('ascii', 'ignore')\n return text", "def to_ascii(string):\n\tfor key in UNICODE_TO_ASCII:\n\t\tstring = string.replace(key, UNICODE_TO_ASCII[key])\n\n\treturn string", "def _UnicodeToAscii(string):\n return unicode(string).encode('utf-8')", "def to_ascii(input):\n converted = []\n for i in range(len(input)):\n converted.append(ord(input[i]))\n return converted", "async def ascii(self, ctx, *, text):\n text = text.replace(' ', '\\n')\n \n if not text:\n await ctx.send(f\"{ctx.tick(False)} You need to specify the text you want to convert!\")\n \n _fig = figlet_format(text.replace(' ', '\\n'))\n \n if len(_fig) > 1300:\n await ctx.send(f\"{ctx.tick(False)} That message is too long!\")\n await ctx.send(f\"{ctx.tick(True)} Done!\")\n await ctx.send(f\"```{_fig}```\")", "def to_ascii(a):\n\treturn str(\n\t\ta.tostring(),\n\t\t\"UTF-8\",\n\t\terrors='ignore'\n\t)", "def to_ascii(line):\n data = [ord(c) for c in line]\n data.append(10)\n return data", "def toASCII(self, *args, **kwargs):\n return _image.image_toASCII(self, *args, **kwargs)", "def convert( self, text ):\n if self.input_codec != self.output_codec:\n return unicode( text, self.input_codec, 'ignore' ).encode( self.output_codec, 'ignore' )\n else:\n return text", "def ascii_convert_str(the_str: str):\n return ANSI_ESCAPE.sub(rb\"\", the_str)", "def string_to_ascii(string):\n if string is None:\n return\n\n if isinstance(string, str):\n string = string.decode('utf-8')\n\n return normalize('NFKD', string).encode('ASCII', 'ignore')", "def _convert_to_text(self):\n if type(self.data) is not list:\n return -1\n out = str()\n for element in self.data:\n out += chr(int(element))\n return (out)", "def encrypt(self, text):\n text = text.upper()\n output = []\n text_list = list(text)\n for letter in text_list:\n output.append(self.atbash_dict.get(letter, letter))\n return ''.join(output)", "def encoding(text: str) -> str:\n text = [text[i:i + 3] for i in range(0, len(text), 3)]\n encoded_text = []\n for letter in text:\n completed = False\n for coding in Encoder.__ALPHABET:\n if coding.encode == letter:\n completed = True\n encoded_text.append(coding.code)\n if completed:\n break\n if not completed:\n encoded_text.append(letter)\n encoded_string = \"\".join(encoded_text)\n return encoded_string.lower()", "def str_to_ascii(a_string):\n try:\n return unicode(a_string).encode(\"ascii\", \"ignore\")\n except UnicodeDecodeError:\n return a_string.decode(\"ascii\", \"ignore\")", "def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])", "def encode(self, text):", "def ascii_convert(the_bytes: bytes):\n return ANSI_ESCAPE_B.sub(rb\"\", the_bytes).decode(\"utf-8\")", "def toAscii(s):\n return ''.join(\n char for char in unicodedata.normalize('NFD', s)\n if unicodedata.category(char) != 'Mn'\n and char in letters\n )", "def character_to_ascii():\n c = input(\"Enter a character: \")\n print(\"The ASCII value of '\" + c+\"' is\", ord(c))", "def to_ascii(ustr):\n return ustr.encode('utf8')", "def get_encoded_text(self, text):\n\t\tencoded_text = \"\"\n\t\tfor character in text:\n\t\t\tencoded_text += self.codes[character]\n\t\treturn encoded_text", "def replaceNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else ' ' for i in text])", "def translateString(self, text):\n result = ()\n length = len(text)\n for i in range(0, length):\n charCode = ord(text[i])\n if (charCode < 32):\n charCode = 32\n elif (charCode > 127):\n charCode = 127\n result = (result + (charCode,))\n \n return result" ]
[ "0.6927363", "0.687013", "0.64326686", "0.6421335", "0.640143", "0.6400809", "0.6353484", "0.6336575", "0.62252617", "0.6219304", "0.6185122", "0.61767006", "0.6172326", "0.6120976", "0.6010914", "0.5968668", "0.5958699", "0.5954476", "0.593581", "0.59327143", "0.59102607", "0.5879108", "0.58690476", "0.5864053", "0.586107", "0.5860022", "0.5823424", "0.5822647", "0.5796927", "0.5782578" ]
0.7760155
0
Clean up the text from a ```` quote element.
def process_quote_text(quote_text): quote_text = quote_text.replace('―', '').replace('\n\n', '\n') quote_text = quote_text[:-1] if quote_text[-1] == '\n' else quote_text for char in HTML: quote_text = quote_text.replace(*char) return quote_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_text(text):\n return text.replace('\\n', '').replace('\"', '')", "def initial_quotes(self, text):\n\n quote_finder = re.compile(r\"\"\"\n ( # Start group capture\n (\"|&ldquo;|&\\#8220;) # A double quote\n | # Or\n ('|&lsquo;|&\\#8216;) # A single quote\n ) # End group capture\n \"\"\", re.VERBOSE)\n\n replace_function = lambda match: \"\"\"<span class=\"%s\">%s</span>\"\"\"\\\n % ('dquo' if match.group(2) else 'quo', match.group(1))\n text = quote_finder.sub(replace_function, text, 1) \n \n return text", "def strip_quotes(text) -> str:\n if text is None:\n return \"\"\n\n l = len(text)\n\n if l == 0:\n return text\n\n start = 0 if text[0] != \"'\" and text[0] != '\"' else 1\n end = l if text[l-1] != \"'\" and text[l-1] != '\"' else l-1\n\n return text[start:end]", "def _remove_extra_quotation(text: str) -> str:\n text = re.sub(r'\\\"{2,}', '\"', text)\n\n return re.sub(r'\\'{2,}', \"'\", text)", "def strip_quotes(text: str) -> str:\n\t\tpieces = [\n\t\t\t\t('`', '`'),\n\t\t\t\t(Chars.lsq, Chars.rsq), (Chars.ldq, Chars.rdq), (\"'\", \"'\"), ('\"', '\"')\n\t\t\t]\n\t\treturn StringTools.strip_paired(text, pieces)", "def refined_text(text):\n import re\n text = text.replace('<e1>','')\n text = text.replace('</e1>','')\n text = text.replace('<e2>','')\n text = text.replace('</e2>','')\n\n text = text[1:-1] # trim quotes\n # text = text.replace('\"','')\n # text = text.replace(',','')\n # text = text.replace('.','')\n # text = text.replace(';','')\n # text = text.replace('`','')\n # text = text.replace('\\'','')\n # text = text.replace('(','')\n # text = text.replace(')','')\n # text = text.replace('/','')\n\n return text", "def removeQuotes(text):\n lines = []\n for l in io.StringIO(text):\n l = l.strip()\n if l and l[0] != '>':\n lines.append(l)\n return ' '.join(lines)", "def add_smart_quotes(article: Article) -> Article:\n text_tag: bs4.NavigableString\n for text_tag in article.content.find_all(text=True):\n #\\1 will sub in the first matched group\n new_tag = re.sub(r'\"([^\"]*)\"', r'“\\1”', text_tag)\n text_tag.replace_with(new_tag)\n return article", "def fix_end_quote(text: str):\n return (text + '\"') if text.count('\"') % 2 != 0 else text", "def __clean_string(cls, text):\n if text.startswith(\"(\"):\n text = text[1:]\n if text.endswith(\")\"):\n text = text[:-1]\n if text.endswith(\",\"):\n text = text[:-1]\n if len(text) > 2 and cls.__is_quote(text[0]) and \\\n cls.__is_quote(text[-1]):\n text = text[1:-1]\n return text", "def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text", "def clean_description(text):\n text = text.replace(\"\\u00c2\\u00bf\", \"'\")\n text = re.sub(r\"<\\s*br\\s*/?>\", \"\\n\", text, flags=re.IGNORECASE)\n text = re.sub(r\"</\\s*br>\", \"\", text, flags=re.IGNORECASE)\n return text", "def clean_text(text: Any) -> str:\n return textwrap.dedent(str(text)).strip()", "def remove_special_tags(text):\n clean = re.compile('{.*?}')\n return re.sub(clean, '', text)", "def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text", "def defang_text(text):\n text = text.replace(\"'\", \"''\")\n text = text.replace('\"', '\"\"')\n return text", "def clean_tag(data):\n # TODO: make this a method of Tag?\n return escape_html(data).replace('\"', '&quot;').replace(\"'\", '&#39')", "def remove_apostrophes(text: str) -> str:\n apostrophes_re = re.compile(\"'\")\n return apostrophes_re.sub(' ', text)", "def strip_markup(text):\n html_tag_regex = re.compile(\n r'<'\n r'[(--)\\?\\!\\%\\/]?'\n r'[a-zA-Z0-9#\\\"\\=\\s\\.\\;\\:\\%\\&?!,\\+\\*\\-_\\/]+'\n r'\\/?>',\n re.MULTILINE | re.UNICODE\n )\n if text:\n text = re.sub(html_tag_regex, ' ', text)\n return text", "def removeMarkup(self, text):\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)", "def strip_tags(text):\n # Remove header tags\n p = re.compile(\"<\\?.+?\\?>\") \n text = re.sub(p, \"\", text)\n\n # Remove <HOO>, <p> and <s> tags\n text = text.replace(\"<p>\",\"\")\n text = text.replace(\"</p>\",\"\")\n text = text.replace(\"<s>\",\"\")\n text = text.replace(\"</s>\",\"\")\n text = text.replace(\"<HOO>\",\"\")\n text = text.replace(\"</HOO>\",\"\")\n\n return text", "def remove_tags(raw):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, ' ', raw)\n return cleantext", "def remove_Tags(self,text):\n cleaned_text = re.sub('<[^<]+?>', '', text)", "def _do_smart_punctuation(self, text):\r\n if \"'\" in text: # guard for perf\r\n text = self._do_smart_contractions(text)\r\n text = self._opening_single_quote_re.sub(\"&#8216;\", text)\r\n text = self._closing_single_quote_re.sub(\"&#8217;\", text)\r\n\r\n if '\"' in text: # guard for perf\r\n text = self._opening_double_quote_re.sub(\"&#8220;\", text)\r\n text = self._closing_double_quote_re.sub(\"&#8221;\", text)\r\n\r\n text = text.replace(\"---\", \"&#8212;\")\r\n text = text.replace(\"--\", \"&#8211;\")\r\n text = text.replace(\"...\", \"&#8230;\")\r\n text = text.replace(\" . . . \", \"&#8230;\")\r\n text = text.replace(\". . .\", \"&#8230;\")\r\n return text", "def concat_text(self, elem):\n\n s = u\" \".join([ frag.strip() for frag in elem.itertext() if re.search(\"\\S\", frag) ]) \n return re.sub(\" (\\W )\", \"\\\\1\", s)", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def formalize_quotes(text: str) -> str:\n for q, p in quote_classes.items():\n for c in p:\n text = re.sub(c, q, text)\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text" ]
[ "0.6924355", "0.6894347", "0.68927824", "0.68723285", "0.6872247", "0.6835093", "0.67228824", "0.66282713", "0.63394177", "0.6337055", "0.6335682", "0.6283214", "0.62645966", "0.62553567", "0.6253903", "0.6252699", "0.6246763", "0.62458956", "0.62109214", "0.6181878", "0.61814696", "0.61693555", "0.6163092", "0.6129546", "0.6118261", "0.61011577", "0.6096424", "0.6093637", "0.6093637", "0.6093637" ]
0.746589
0
Split an href and retrieve the author's name and its key.
def parse_author_href(href): author_parts = href.split('/')[-1].split('.') key = author_parts[0] author_name = author_parts[1].replace('_', ' ') return author_name, key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_author(html_soup):\n auth_text = html_soup.find('div', attrs = {\"class\" : \"author\"}).text.split(\"|\")\n for i in auth_text:\n if 'By' in i:\n auth_text_split = i.split()\n auth_text_split = auth_text_split[auth_text_split.index('By')+1:auth_text_split.index('By')+3]\n auth_text_split = ' '.join(auth_text_split)\n return(auth_text_split)", "def get_author_titles(author_url):\n\t# Get the html tree for the author page\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\n\tall_titles = []\n\tlinks = []\n\t# Loop through a elements and put associated href and text into respective lists\n\tfor a in a_elems:\n\t\tall_titles.append(a.text_content())\n\t\tlinks.append(a.get(\"href\"))\n\n\t# Create list of (title, url) tuples\n\ttitles_links = zip(all_titles, links)\n\t# Get the list of titles of papers that have been tagged with a subject\n\ttagged_titles = get_tagged_titles(titles_links)\n\t# Return the 2 lists in a tuple\n\treturn (all_titles, tagged_titles)", "def extract_author(bs_soup):\n sub_item = bs_soup.find(\"div\", class_=AUTHOR_CLASS)\n if sub_item:\n return sub_item.text\n return None", "def extract_author(self, parsed_html, author_idx=0, author=None):\n\n if not author:\n author = {}\n\n\n # Find all links with this xpath and tries to classify them\n links = []\n xpath = '//div[@class=\"profile cf\"]/div/ul/li/a'\n\n for url in parsed_html.xpath(xpath):\n links += [url.get('href')]\n\n for social in self.classify_links(links):\n author[social[0]] = social[1]\n\n\n # Get description text provided by the author\n xpath = '//div[contains(@class, \"profile-text\")]/p'\n items = parsed_html.xpath(xpath)\n author['about'] = self.clear_text(items)\n\n\n # Get Crunchbase url profile\n xpath = '//div[contains(@class, \"profile-text\")]/a'\n website = parsed_html.xpath(xpath)\n\n if website:\n author['website'] = website[0].get('href')\n\n\n # Get his/her avatar url\n xpath = '//div[@class=\"profile cf\"]/div/img'\n avatar = parsed_html.xpath(xpath)\n\n if avatar:\n author['avatar'] = avatar[0].get('src')\n\n\n # Tries to get the author's profile url\n xpath = 'meta[@property=\"og:url\"]'\n author['profile'] = self.get_text_or_attr(\n parsed_html, xpath, 'content'\n )\n\n\n # If the author cannot be fetched from his generated url, we have to\n # check the article's page in order to find him/her.\n\n if 'twitter' not in author and 'flag' not in author:\n\n parsed = self.parse(self.article_url, self.article_page_type)\n return self.extract_author_from_page(parsed, author_idx)\n\n\n if 'flag' in author:\n del author['flag']\n\n\n return author", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def scopus_author_link(self):\n return self._json['coredata'].get('link', [])[1].get('@href')", "def get_article_author(self, article_webpage):\n pass", "def parse_anchor(anchor):\n \n href = anchor.get(\"href\")\n content = anchor.text\n \n if href == None:\n href = ''\n \n if content == None:\n content == ''\n \n return href, content", "def coauthor_link(self):\n return self._json['coredata'].get('link', [])[3].get('@href')", "def _get_name_relurl_and_desc(snippet_html):\n name_and_url_part, desc_part = snippet_html.find_all('p', 'snippet')\n name = name_and_url_part.get_text()\n relative_url = name_and_url_part.find('a').get('href')\n desc = desc_part.get_text()\n return name, relative_url, desc", "def extractParticular(link):\n webpage = openWebsite(link).read()\n nameIndexStart = webpage.index('<title>') + 7\n nameIndexStop = webpage[nameIndexStart:].index('</title>') + nameIndexStart - 1\n name = webpage[nameIndexStart : nameIndexStop].split('-')[0]\n name = \" \".join(name.split())\n name = re.sub('/', '', name)\n\n avatarName = RESTAURANTPATH + '{}.png'.format(\"\".join(name.split()).lower())\n captureImage(link, avatarName)\n\n return name, avatarName", "def author_name(text):\n tag = text.split()\n\n \"\"\"\n We take the beginning of the text since the\n author name will likely be there\n \"\"\"\n\n tag = tag[:100]\n author = []\n\n current_tag = 0\n \"\"\"\n We go through each word until we find the first instance\n of the word 'by' or 'author', which should mean the author\n will be written right after that.\n We save the first word after 'by' or 'author' since it should\n be the authors first name\n \"\"\"\n\n for word in tag:\n if (word.lower() == ('by') or\n word.lower() == ('author') or\n word.lower() == ('author:')):\n\n author.append(tag[current_tag+1].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n tag = tag[current_tag+1:]\n break\n current_tag += 1\n\n \"\"\"\n We go through each word after the first name of the author\n until we find a word that is not capitalized. We assume that\n it marks the end of the author name.\n We then return a list of the author's name split up.\n \"\"\"\n current_tag = 0\n for word in tag:\n if tag[current_tag].lower() == 'this':\n break\n if tag[current_tag].istitle():\n author.append(tag[current_tag].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n\n return author", "def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors", "def get_name_url_matches(author_name, html_tree):\n\n\t# Convert name to lower case - this will be searched against lower case text on the Enlighten page\n\tlower_name = author_name.lower()\n\t# Used to convert text in <a> tags to lower case in paths before checking if matches the name provided\n\tcase = 'translate(text(), \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\")'\n\t# This is the path to look for <a> tags which contain the target name as text\n\t# N.B. contains() is used rather than equals as it can catch more cases\n\tpath = '//table/tr/td/ul/li/a[contains(%s, \\\"%s\\\")]' % (case, lower_name)\n\t# get the list of <a> elements whose text contains the name\n\telements = html_tree.xpath(path)\n\t# If target string was found, for each <a> element that contains it, make a\n\t# (text, url) tuple and create a list out of the resulting tuples\n\t# N.B. the href obtained from the element is concatenated to the base url as it is relative\n\tif elements:\n\t\t# have to concatenate as href is given as relative path\n\t\ttext_url_tups = [(elem.text, author_list_base + elem.get(\"href\")) for elem in elements]\n\telse:\n\t\ttext_url_tups = None\n\n\treturn text_url_tups", "def getArtistsFromURL(url):\n html = urllib.request.urlopen(url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")\n table = table[30:len(table) - 2]\n artistLinks = []\n for entry in table:\n text = str(re.findall(\"(?:anonymous).*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\",\"\",text)\n if len(re.findall(\"/\",text)) ==2:\n artistLinks.append(text)\n return artistLinks", "def extract_author_from_page(self, parsed, author_idx=0):\n\n author = {}\n\n author_url = parsed.xpath('//a[@rel=\"author\"]')\n if author_url and len(author_url) > author_idx:\n author['profile'] = 'https://techcrunch.com'\n author['profile'] += author_url[author_idx].get('href')\n\n\n # Find the twitter handle associated with the i-th author\n twitter_handle = parsed.xpath('//span[@class=\"twitter-handle\"]/a')\n if twitter_handle and len(twitter_handle) > author_idx:\n author['twitter'] = twitter_handle[author_idx].get('href')\n\n\n # Parse the new author's page and send it back to `extract_author`.\n # In case the profile page found on article's page does not exist as\n # well, this will be going back and forth on an infinite basis. To exit\n # this loop, we added a flag on the author dict.\n author['flag'] = 'exit'\n\n parsed = self.parse(author['profile'], self.author_page_type)\n\n return self.extract_author(parsed, author_idx, author)", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def parse_author(self, response):\n i = AuthorItem()\n i['name'] = response.xpath('//h3[@class=\"author-title\"]/text()').extract_first().strip()\n i['birth_date'] = response.xpath('//span[@class=\"author-born-date\"]/text()').extract_first()\n birth_location = response.xpath('//span[@class=\"author-born-location\"]/text()').extract_first()\n if birth_location:\n i['birth_location'] = birth_location.replace('in ', '')\n i['description'] = response.xpath('//div[@class=\"author-description\"]/text()').extract_first().strip()\n i['url'] = response.url\n return i", "def _decode_link(self, link):\n\n if link.HasField(\"bucket\"):\n bucket = link.bucket\n else:\n bucket = None\n if link.HasField(\"key\"):\n key = link.key\n else:\n key = None\n if link.HasField(\"tag\"):\n tag = link.tag\n else:\n tag = None\n\n return (bucket, key, tag)", "def link_extract(link_text, content):\n h = html5lib.parse(content, namespaceHTMLElements=False)\n candidates = h.findall(\".//a[.='%s']\" % link_text)\n if not candidates:\n return 'NOT MATCHED'\n try:\n return candidates[0].attrib['href']\n except:\n return 'NOT MATCHED'", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def get_hlinks(source):\n start_sep='href=\"'\n end_sep='\"'\n result=[]\n tmp=source.split(start_sep)\n for par in tmp:\n if end_sep in par:\n result.append(par.split(end_sep)[0])\n return result", "def parse_authors(article):\n author_names = article.find(\"sourcedesc\").findAll(\"persname\")\n authors = []\n for author in author_names:\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n return authors", "def parse_author_affiliation(medline):\n authors = []\n article = medline.find(\"Article\")\n if article is not None:\n author_list = article.find(\"AuthorList\")\n if author_list is not None:\n authors_list = author_list.findall(\"Author\")\n for author in authors_list:\n if author.find(\"ForeName\") is not None:\n forename = (author.find(\"ForeName\").text or \"\").strip() or \"\"\n else:\n forename = \"\"\n if author.find(\"Initials\") is not None:\n initials = (author.find(\"Initials\").text or \"\").strip() or \"\"\n else:\n initials = \"\"\n if author.find(\"LastName\") is not None:\n lastname = (author.find(\"LastName\").text or \"\").strip() or \"\"\n else:\n lastname = \"\"\n if author.find(\"Identifier\") is not None:\n identifier = (author.find(\"Identifier\").text or \"\").strip() or \"\"\n else:\n identifier = \"\"\n if author.find(\"AffiliationInfo/Affiliation\") is not None:\n affiliation = author.find(\"AffiliationInfo/Affiliation\").text or \"\"\n affiliation = affiliation.replace(\n \"For a full list of the authors' affiliations please see the Acknowledgements section.\",\n \"\",\n )\n else:\n affiliation = \"\"\n authors.append(\n {\n \"lastname\": lastname,\n \"forename\": forename,\n \"initials\": initials,\n \"identifier\": identifier,\n \"affiliation\": affiliation,\n }\n )\n return authors", "def _parse_name(self, cell, cell_content):\n mp_page = cell_content.find(\"a\").attrs[\"href\"]\n\n full_name = cell_content.text.strip()\n name, *title = full_name.split(\",\")\n last, *first = name.split(\" \")\n\n id_ = mp_page[mp_page.find(\"PAD_\") + 4 : mp_page.rfind(\"/\")]\n url = re.sub(\"index.shtml$\", \"\", mp_page)\n\n first_name = \" \".join(first).rstrip(\",\").strip()\n last_name = last.strip()\n title = \",\".join(title).strip()\n\n return {\n \"id\": id_,\n \"url\": url,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"title\": title,\n }", "def tokenize_href(self, soup):\n for a in soup.find_all(u'a'):\n href = a.attrs.get(u'href', u'')\n # Absolute URLs only.\n if (href.startswith(u'//') or\n href.startswith(u'http://') or\n href.startswith(u'https://')):\n self.tokenize(href)", "def get_links(names, html):\n ###TODO\n people = []\n readweb = BeautifulSoup(html, 'html.parser')\n for a in readweb.find_all('a'):\n person = os.path.basename(str(a.get('href')))\n if person in names:\n people.append(person)\n return SortedSet(people)\n pass", "def split_author(author: str) -> Tuple[str, str]:\n author = author.split(' ')\n if len(author) == 1:\n author_first, author_last = None, author[0]\n else:\n # If len is more than 2, it may be initials or a middle name; group these\n # into the first name.\n *author_first, author_last = author\n author_first = ' '.join(author_first)\n return author_first, author_last", "def _parse_link_header(link_header):\n result = {\n \"next\": None,\n \"last\": None,\n \"first\": None,\n \"prev\": None\n }\n if link_header is None:\n return result\n\n links = link_header.split(\",\")\n for link in links:\n parts = link.split(\";\")\n if len(parts) != 2:\n log.abort_and_exit(\"GHUB\", f\"Failed to parse Link header: '{link_header}'.\")\n url = parts[0].strip()[1:-1]\n rel = parts[1]\n if re.match('rel=\"next\"', rel.strip()):\n result[\"next\"] = url\n elif re.match('rel=\"last\"', rel.strip()):\n result[\"last\"] = url\n elif re.match('rel=\"first\"', rel.strip()):\n result[\"first\"] = url\n elif re.match('rel=\"prev\"', rel.strip()):\n result[\"prev\"] = url\n return result", "def find_link_title(link_para):\n urls = []\n source_code = requests.get(link_para)\n plain_text = source_code.text\n parsed_html = BeautifulSoup(plain_text)\n for sub_link in parsed_html.find_all('a'):\n urls.append(sub_link.string)\n print urls" ]
[ "0.64172477", "0.6224456", "0.61805755", "0.61738443", "0.6171362", "0.61090887", "0.57969236", "0.57669604", "0.573891", "0.5724362", "0.5689637", "0.5636948", "0.56326914", "0.56212753", "0.5613059", "0.5584283", "0.55722445", "0.555239", "0.55041575", "0.55039084", "0.54860914", "0.5476453", "0.54573244", "0.5444943", "0.5406279", "0.54027444", "0.5366978", "0.53610456", "0.5336658", "0.53365254" ]
0.86064184
0
Serialize a list in ASCII format, so it can be saved as a JSON.
def serialize_list(list_raw): list_serialized = [] for value in list_raw: if isinstance(value, list): list_serialized.append(serialize_list(value)) elif isinstance(value, dict): list_serialized.append(serialize_dict(value)) else: list_serialized.append(unidecode.unidecode(str(value))) return list_serialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_list(list_data, path, lineterminator='\\n', encoding=None, mode='w'):\n with open(path, mode) as f:\n list_data = [item + lineterminator for item in list_data]\n if encoding is not None:\n list_data = [item.encode(encoding) for item in list_data]\n\n f.writelines(list_data)", "def test_serialize_list():\n assert bytes([\n *UnsignedInt.to_bytes(3), # Number of values\n *String.to_bytes(\"Hello, world!\"),\n *String.to_bytes(\"This is the middle value.\"),\n *String.to_bytes(\"Goodbye, world!\")\n ]) == bytes(List(String).to_bytes([\n \"Hello, world!\",\n \"This is the middle value.\",\n \"Goodbye, world!\",\n ]))", "def list_to_json(items):\n return json.dumps(to_dict_list(items))", "def _encode_list(source: list) -> bytes:\n result_data = b\"l\"\n\n for item in source:\n result_data += encode(item)\n\n return result_data + b\"e\"", "def write(lst):\n # TODO", "def parse_list(obj: Iterable) -> bytes:\n bytes_ = b'['\n for i, value in enumerate(obj):\n ret = parse_obj(value)\n if not ret[0] in [b'/', b'(', b'<'] and i != 0: bytes_ += b' '\n bytes_ += ret\n\n return bytes_ + b']'", "def list_2_string(l, name='List'):\n buff = io.StringIO()\n print_list(l, name=name, output=buff)\n return buff.getvalue()", "def serialize_list(self, obj):\n return self.serialize_tuple(obj)", "def list_to_json(input_list, file_name):\n with open(file_name, 'w') as outfile:\n json.dump(input_list, outfile)", "def _encode_list(data_type, obj, alias_validators, old_style, for_msgpack):\n # Because Lists are mutable, we always validate them during serialization.\n obj = data_type.validate(obj)\n return [\n _json_compat_obj_encode_helper(\n data_type.item_validator, item, alias_validators, old_style, for_msgpack)\n for item in obj\n ]", "def encode_list(value: list, inner_encoder: typing.Callable) -> bytes:\n return encode_vector_of_t(list(map(inner_encoder, value)))", "def to_json(self) -> str:\n data_dict = self._to_list_dict()\n return json.dumps(data_dict, indent=4, cls=NumpyEncoder)", "def dumps(self, obj):\n return (\"[\" + \",\".join(obj) + \"]\").encode(\"latin-1\")", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, list):\n if not isinstance(item, str):\n thisitem = str(item)\n else:\n thisitem = item\n else:\n thisitem = list_stringify(item)\n outlist.append(thisitem)\n return outlist", "def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))", "def encode(self, obj):\n # type: (List[List[Any]]) -> str\n raise NotImplementedError()", "def _to_serialize_list(value):\n return [v.serialize() for v in value]", "def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, (tuple, list)):\n if not isinstance(item, basestring):\n item = str(item)\n else:\n item = list_stringify(item)\n outlist.append(item)\n return outlist", "def write_list(self, register, data):\n raise NotImplementedError", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def output_jsonl(filename: str, data: List):\n with open(filename, \"w\") as outfile:\n for x in data:\n print(json.dumps(x))\n json.dump(x, outfile)\n outfile.write(\"\\n\")", "def save_server_list_json(server_list):\n\n with open(output_file,\"w+\") as f:\n json.dump(server_list, f)\n\n return server_list", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))" ]
[ "0.69977295", "0.69112235", "0.68876225", "0.68486106", "0.6791603", "0.67712677", "0.66618836", "0.6654723", "0.6545836", "0.65312964", "0.6502697", "0.64593536", "0.6425355", "0.64149016", "0.6390966", "0.6359837", "0.6293224", "0.62871563", "0.6230964", "0.62002784", "0.62002784", "0.6186298", "0.6182611", "0.61812884", "0.61573374", "0.6143165", "0.61218154", "0.6087554", "0.6087554", "0.60854465" ]
0.73018533
1
Serialize a list in ASCII format, so it can be saved as a JSON.
def serialize_list(list_raw): list_serialized = [] for value in list_raw: if isinstance(value, list): list_serialized.append(serialize_list(value)) elif isinstance(value, dict): list_serialized.append(serialize_dict(value)) else: list_serialized.append(unidecode.unidecode(str(value))) return list_serialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_list(list_data, path, lineterminator='\\n', encoding=None, mode='w'):\n with open(path, mode) as f:\n list_data = [item + lineterminator for item in list_data]\n if encoding is not None:\n list_data = [item.encode(encoding) for item in list_data]\n\n f.writelines(list_data)", "def test_serialize_list():\n assert bytes([\n *UnsignedInt.to_bytes(3), # Number of values\n *String.to_bytes(\"Hello, world!\"),\n *String.to_bytes(\"This is the middle value.\"),\n *String.to_bytes(\"Goodbye, world!\")\n ]) == bytes(List(String).to_bytes([\n \"Hello, world!\",\n \"This is the middle value.\",\n \"Goodbye, world!\",\n ]))", "def list_to_json(items):\n return json.dumps(to_dict_list(items))", "def _encode_list(source: list) -> bytes:\n result_data = b\"l\"\n\n for item in source:\n result_data += encode(item)\n\n return result_data + b\"e\"", "def write(lst):\n # TODO", "def parse_list(obj: Iterable) -> bytes:\n bytes_ = b'['\n for i, value in enumerate(obj):\n ret = parse_obj(value)\n if not ret[0] in [b'/', b'(', b'<'] and i != 0: bytes_ += b' '\n bytes_ += ret\n\n return bytes_ + b']'", "def list_2_string(l, name='List'):\n buff = io.StringIO()\n print_list(l, name=name, output=buff)\n return buff.getvalue()", "def serialize_list(self, obj):\n return self.serialize_tuple(obj)", "def list_to_json(input_list, file_name):\n with open(file_name, 'w') as outfile:\n json.dump(input_list, outfile)", "def _encode_list(data_type, obj, alias_validators, old_style, for_msgpack):\n # Because Lists are mutable, we always validate them during serialization.\n obj = data_type.validate(obj)\n return [\n _json_compat_obj_encode_helper(\n data_type.item_validator, item, alias_validators, old_style, for_msgpack)\n for item in obj\n ]", "def encode_list(value: list, inner_encoder: typing.Callable) -> bytes:\n return encode_vector_of_t(list(map(inner_encoder, value)))", "def to_json(self) -> str:\n data_dict = self._to_list_dict()\n return json.dumps(data_dict, indent=4, cls=NumpyEncoder)", "def dumps(self, obj):\n return (\"[\" + \",\".join(obj) + \"]\").encode(\"latin-1\")", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, list):\n if not isinstance(item, str):\n thisitem = str(item)\n else:\n thisitem = item\n else:\n thisitem = list_stringify(item)\n outlist.append(thisitem)\n return outlist", "def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))", "def encode(self, obj):\n # type: (List[List[Any]]) -> str\n raise NotImplementedError()", "def _to_serialize_list(value):\n return [v.serialize() for v in value]", "def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, (tuple, list)):\n if not isinstance(item, basestring):\n item = str(item)\n else:\n item = list_stringify(item)\n outlist.append(item)\n return outlist", "def write_list(self, register, data):\n raise NotImplementedError", "def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])", "def output_jsonl(filename: str, data: List):\n with open(filename, \"w\") as outfile:\n for x in data:\n print(json.dumps(x))\n json.dump(x, outfile)\n outfile.write(\"\\n\")", "def save_server_list_json(server_list):\n\n with open(output_file,\"w+\") as f:\n json.dump(server_list, f)\n\n return server_list", "def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))" ]
[ "0.699641", "0.6911219", "0.68891096", "0.6848675", "0.67918223", "0.67718494", "0.6662254", "0.66567713", "0.65459096", "0.65335584", "0.6503121", "0.645965", "0.6425915", "0.6414398", "0.63929796", "0.63601375", "0.6294473", "0.62877816", "0.62299633", "0.6200803", "0.6200803", "0.6188109", "0.6182584", "0.618", "0.61573195", "0.6143091", "0.612152", "0.6088256", "0.6088256", "0.6086211" ]
0.73022383
0
Return a schedule which measures the requested qubits according to the given instruction mapping and measure map, or by using the defaults provided by the backendV1.
def _measure_v1( qubits: List[int], inst_map: InstructionScheduleMap, meas_map: Union[List[List[int]], Dict[int, List[int]]], qubit_mem_slots: Optional[Dict[int, int]] = None, measure_name: str = "measure", ) -> Schedule: schedule = Schedule(name=f"Default measurement schedule for qubits {qubits}") if isinstance(meas_map, list): meas_map = utils.format_meas_map(meas_map) measure_groups = set() for qubit in qubits: measure_groups.add(tuple(meas_map[qubit])) for measure_group_qubits in measure_groups: if qubit_mem_slots is not None: unused_mem_slots = set(measure_group_qubits) - set(qubit_mem_slots.values()) try: default_sched = inst_map.get(measure_name, measure_group_qubits) except exceptions.PulseError as ex: raise exceptions.PulseError( "We could not find a default measurement schedule called '{}'. " "Please provide another name using the 'measure_name' keyword " "argument. For assistance, the instructions which are defined are: " "{}".format(measure_name, inst_map.instructions) ) from ex for time, inst in default_sched.instructions: if inst.channel.index not in qubits: continue if qubit_mem_slots and isinstance(inst, instructions.Acquire): if inst.channel.index in qubit_mem_slots: mem_slot = channels.MemorySlot(qubit_mem_slots[inst.channel.index]) else: mem_slot = channels.MemorySlot(unused_mem_slots.pop()) inst = instructions.Acquire(inst.duration, inst.channel, mem_slot=mem_slot) # Measurement pulses should only be added if its qubit was measured by the user schedule = schedule.insert(time, inst) return schedule
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure(\n qubits: List[int],\n backend=None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[Union[List[List[int]], Dict[int, List[int]]]] = None,\n qubit_mem_slots: Optional[Dict[int, int]] = None,\n measure_name: str = \"measure\",\n) -> Schedule:\n\n # backend is V2.\n if isinstance(backend, BackendV2):\n\n return _measure_v2(\n qubits=qubits,\n target=backend.target,\n meas_map=meas_map or backend.meas_map,\n qubit_mem_slots=qubit_mem_slots or dict(zip(qubits, range(len(qubits)))),\n measure_name=measure_name,\n )\n # backend is V1 or backend is None.\n else:\n try:\n return _measure_v1(\n qubits=qubits,\n inst_map=inst_map or backend.defaults().instruction_schedule_map,\n meas_map=meas_map or backend.configuration().meas_map,\n qubit_mem_slots=qubit_mem_slots,\n measure_name=measure_name,\n )\n except AttributeError as ex:\n raise exceptions.PulseError(\n \"inst_map or meas_map, and backend cannot be None simultaneously\"\n ) from ex", "def _measure_v2(\n qubits: List[int],\n target: Target,\n meas_map: Union[List[List[int]], Dict[int, List[int]]],\n qubit_mem_slots: Dict[int, int],\n measure_name: str = \"measure\",\n) -> Schedule:\n schedule = Schedule(name=f\"Default measurement schedule for qubits {qubits}\")\n\n if isinstance(meas_map, list):\n meas_map = utils.format_meas_map(meas_map)\n meas_group = set()\n for qubit in qubits:\n meas_group |= set(meas_map[qubit])\n meas_group = sorted(meas_group)\n\n meas_group_set = set(range(max(meas_group) + 1))\n unassigned_qubit_indices = sorted(set(meas_group) - qubit_mem_slots.keys())\n unassigned_reg_indices = sorted(meas_group_set - set(qubit_mem_slots.values()), reverse=True)\n if set(qubit_mem_slots.values()).issubset(meas_group_set):\n for qubit in unassigned_qubit_indices:\n qubit_mem_slots[qubit] = unassigned_reg_indices.pop()\n\n for measure_qubit in meas_group:\n try:\n if measure_qubit in qubits:\n default_sched = target.get_calibration(measure_name, (measure_qubit,)).filter(\n channels=[\n channels.MeasureChannel(measure_qubit),\n channels.AcquireChannel(measure_qubit),\n ]\n )\n schedule += _schedule_remapping_memory_slot(default_sched, qubit_mem_slots)\n except KeyError as ex:\n raise exceptions.PulseError(\n \"We could not find a default measurement schedule called '{}'. \"\n \"Please provide another name using the 'measure_name' keyword \"\n \"argument. For assistance, the instructions which are defined are: \"\n \"{}\".format(measure_name, target.instructions)\n ) from ex\n return schedule", "def get_pulse_schedule(backend: IBMQBackend) -> Schedule:\n config = backend.configuration()\n defaults = backend.defaults()\n inst_map = defaults.instruction_schedule_map\n\n # Run 2 experiments - 1 with x pulse and 1 without\n x = inst_map.get('x', 0)\n measure = inst_map.get('measure', range(config.n_qubits)) << x.duration\n ground_sched = measure\n excited_sched = x | measure\n schedules = [ground_sched, excited_sched]\n return schedules", "def measure_all(backend) -> Schedule:\n # backend is V2.\n if isinstance(backend, BackendV2):\n qubits = list(range(backend.num_qubits))\n else:\n qubits = list(range(backend.configuration().n_qubits))\n return measure(qubits=qubits, backend=backend)", "def test_sequenced_parameterized_schedule(self):\n\n converter = QobjToInstructionConverter([], buffer=0)\n qobjs = [\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=10, phase=\"P1\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=20, phase=\"P2\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=30, phase=\"P3\"),\n ]\n converted_instruction = [converter(qobj) for qobj in qobjs]\n\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"inst_seq\", 0, Schedule(*converted_instruction, name=\"inst_seq\"))\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=3, P4=4, P5=5)\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, 1, 2, 3, 4, 5, 6, 7, 8)\n\n p3_expr = Parameter(\"p3\")\n p3_expr = p3_expr.bind({p3_expr: 3})\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)", "def test_schedule_generator(self):\n\n dur_val = 10\n amp = 1.0\n\n def test_func(dur: int):\n sched = Schedule()\n sched += Play(library.constant(int(dur), amp), DriveChannel(0))\n return sched\n\n expected_sched = Schedule()\n expected_sched += Play(library.constant(dur_val, amp), DriveChannel(0))\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"f\", (0,), test_func)\n self.assertEqual(inst_map.get(\"f\", (0,), dur_val), expected_sched)\n\n self.assertEqual(inst_map.get_parameters(\"f\", (0,)), (\"dur\",))", "def test_add(self):\n sched = Schedule()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))\n\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", (), sched)\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", 1, \"not a schedule\")", "def rabi_schedules(amp_list, qubits, pulse_width, pulse_sigma=None,\n width_sigma_ratio=4, drives=None, cmd_def=None,\n inst_map=None, meas_map=None):\n\n xdata = amp_list\n\n # copy the instruction to schedule mapping\n inst_map = copy.deepcopy(inst_map)\n if not inst_map:\n inst_map = copy.deepcopy(cmd_def)\n\n if pulse_sigma is None:\n pulse_sigma = pulse_width / width_sigma_ratio\n\n # Construct the circuits\n qr = qiskit.QuantumRegister(max(qubits) + 1)\n cr = qiskit.ClassicalRegister(len(qubits))\n\n circuits = []\n\n for circ_index, g_amp in enumerate(amp_list):\n\n circ = qiskit.QuantumCircuit(qr, cr)\n circ.name = 'rabicircuit_%d_0' % circ_index\n\n rabi_pulse = pulse_lib.gaussian(duration=pulse_width,\n amp=g_amp,\n sigma=pulse_sigma,\n name='rabi_pulse_%d' % circ_index)\n\n rabi_gate = Gate(name='rabi_%d' % circ_index, num_qubits=1, params=[])\n\n for _, qubit in enumerate(qubits):\n\n # add commands to schedule\n schedule = pulse.Schedule(name='rabi_pulse_%f_%d' % (g_amp,\n qubit))\n\n schedule += rabi_pulse(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('rabi_%d' % circ_index, qubits=[qubit],\n schedule=schedule)\n\n circ.append(rabi_gate, [qr[qubit]])\n\n for qind, qubit in enumerate(qubits):\n circ.measure(qr[qubit], cr[qind])\n\n circuits.append(circ)\n\n # schedule\n schedule_config = ScheduleConfig(inst_map, meas_map)\n rabi_sched = [schedule_circuit(qcirc,\n schedule_config)\n for qcirc in circuits]\n\n return rabi_sched, xdata", "def run_and_measure(\n self,\n quil_program: Program,\n qubits: Optional[List[int]] = None,\n trials: int = 1,\n memory_map: Optional[Dict[str, List[Union[int, float]]]] = None,\n ) -> np.ndarray:\n if qubits is None:\n qubits = sorted(cast(Set[int], quil_program.get_qubits(indices=True)))\n\n if memory_map is not None:\n quil_program = self.augment_program_with_memory_values(quil_program, memory_map)\n\n request = self._run_and_measure_request(\n quil_program=quil_program,\n qubits=qubits,\n trials=trials,\n )\n response = self._qvm_client.run_and_measure_program(request)\n return np.asarray(response.results)", "def simulate_and_measure(qubit, repetitions=10, mkey=\"m\", power=0.1):\n operations = [cirq.X(qubit)**power, cirq.measure(qubit, key=mkey)]\n\n circuit = cirq.Circuit(operations)\n simulator = cirq.Simulator()\n result = simulator.run(circuit, repetitions=repetitions)\n\n return result, circuit", "def iaq_measure(self) -> List[int]:\n # name, command, signals, delay\n return self._run_profile((\"iaq_measure\", [0x20, 0x08], 2, 0.05))", "def test_add_block(self):\n sched = ScheduleBlock()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))", "def _compile_schedule_from_problem(self, problem, formulation=1, skip_null='null'):\n cmp_sch_dsp = {1:self._cmp_sch_m1, 2:self._cmp_sch_m2}\n return cmp_sch_dsp[formulation](problem, skip_null)", "def test_get_schedule_with_unbound_parameter(self):\n param1 = Parameter(\"param1\")\n param2 = Parameter(\"param2\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param2=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def test_schedule_with_multiple_parameters_under_same_name(self):\n param1 = Parameter(\"param\")\n param2 = Parameter(\"param\")\n param3 = Parameter(\"param\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n target_sched.insert(20, ShiftPhase(param3, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(20, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def test_has_from_mock(self):\n inst_map = FakeOpenPulse2Q().defaults().instruction_schedule_map\n self.assertTrue(inst_map.has(\"u1\", [0]))\n self.assertTrue(inst_map.has(\"cx\", (0, 1)))\n self.assertTrue(inst_map.has(\"u3\", 0))\n self.assertTrue(inst_map.has(\"measure\", [0, 1]))\n self.assertFalse(inst_map.has(\"u1\", [0, 1]))\n with self.assertRaises(PulseError):\n inst_map.assert_has(\"dne\", [0])\n with self.assertRaises(PulseError):\n inst_map.assert_has(\"cx\", 100)", "def __init__(self, qubit, bit, circuit=None):\n super().__init__(\"measure\", [], [qubit], [bit], circuit)", "def test_myqlm_backend():\n circuit = Circuit()\n circuit += ops.DefinitionBit(name='ro', length=2, is_output=True)\n circuit += ops.RotateZ(qubit=0, theta=0)\n circuit += ops.PauliX(qubit=1)\n circuit += ops.MeasureQubit(qubit=0, readout='ro', readout_index=0)\n circuit += ops.MeasureQubit(qubit=1, readout='ro', readout_index=1)\n\n backend = MyQLMBackend(number_qubits=2,\n number_measurements=5)\n\n # (bit_dict, float_dict, complex_dict) = backend.run_circuit(circuit)\n # npt.assert_equal(float_dict, dict())\n # npt.assert_equal(complex_dict, dict())\n # npt.assert_equal(bit_dict['ro'], [np.array([0., 1.])] * 5)", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()\n\n if self.get_param_value('clip_drive_amp'):\n for qb in self.qubits:\n tr_name = self.get_param_value('transition_name', qubit=qb.name)\n max_drive_amp = self.get_param_value('max_drive_amp',\n qubit=qb.name)\n if tr_name == 'ge' and qb.ge_amp180() > max_drive_amp:\n qb.ge_amp180(\n self.get_param_value('default_ge_amp180',\n qubit=qb.name))\n elif tr_name == 'ef' and qb.ef_amp180() > max_drive_amp:\n qb.ef_amp180(\n self.get_param_value('default_ef_amp180',\n qubit=qb.name))\n\n for qb in self.qubits:\n tr_name = self.get_param_value('transition_name', qubit=qb.name)\n amp_str = f'{tr_name}_amp180'\n self.results[qb.name] = {amp_str: getattr(qb, amp_str)()}", "def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:\n return [self.state._measure(self.qubit_map[q], self.prng) for q in qubits]", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n\n if self.experiment_options.frequency_shift is None:\n try:\n anharm, _ = backend.properties().qubit_property(self.physical_qubits[0])[\n \"anharmonicity\"\n ]\n self.set_experiment_options(frequency_shift=anharm)\n except KeyError as key_err:\n raise CalibrationError(\n f\"The backend {backend} does not provide an anharmonicity for qubit \"\n f\"{self.physical_qubits[0]}. Use EFRabi.set_experiment_options(frequency_shift=\"\n f\"anharmonicity) to manually set the correct frequency for the 1-2 transition.\"\n ) from key_err\n except AttributeError as att_err:\n raise CalibrationError(\n \"When creating the default schedule without passing a backend, the frequency needs \"\n \"to be set manually through EFRabi.set_experiment_options(frequency_shift=..).\"\n ) from att_err\n\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=self.__rabi_gate_name__) as default_schedule:\n with pulse.frequency_offset(\n self.experiment_options.frequency_shift,\n pulse.DriveChannel(self.physical_qubits[0]),\n ):\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=\"rabi\") as default_schedule:\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def run_maf(dbFile, ra, dec):\n\n # establish connection to sqllite database file.\n opsimdb = db.OpsimDatabase(dbFile)\n \n # While we're in transition between opsim v3 and v4, this may be helpful: print(\"{dbFile} is an opsim version {version} database\".format(dbFile=dbFile, version=opsimdb.opsimVersion))\n if opsimdb.opsimVersion == \"V3\":\n # For v3 databases:\n mjdcol = 'expMJD'\n degrees = False\n cols = ['filter', 'fiveSigmaDepth', mjdcol, 'expDate']\n stackerList = []\n else:\n # For v4 and alternate scheduler databases.\n mjdcol = 'observationStartMJD'\n degrees = True\n cols = ['filter', 'fiveSigmaDepth', mjdcol]\n stackerList = [expDateStacker()]\n \n # IntraNightGapsMetric returns the gap (in days) between observations within the same night custom reduceFunc to find min gaps \n metric = metrics.cadenceMetrics.IntraNightGapsMetric(reduceFunc=np.amin, mjdCol=mjdcol)\n # PassMetric just pass all values\n metric_pass = metrics.simpleMetrics.PassMetric(cols=cols)\n # slicer for slicing pointing history\n slicer = slicers.UserPointsSlicer(ra, dec, lonCol='fieldRA', latCol='fieldDec', latLonDeg=degrees)\n # sql constrains, 3 for baseline2018a, 1 for rolling m2045\n sql = ''\n \n # bundles to combine metric, slicer and sql constrain together\n bundle = metricBundles.MetricBundle(metric, slicer, sql)\n date_bundle = metricBundles.MetricBundle(metric_pass, slicer, sql, stackerList=stackerList)\n \n # create metric bundle group and returns\n bg = metricBundles.MetricBundleGroup({'sep': bundle, 'cadence': date_bundle}, opsimdb, outDir=outDir, resultsDb=resultsDb)\n bg.runAll()\n opsimdb.close()\n return bg", "def schedule_experiments(train_fun, decode_fun, eval_fun, train_set, dev_set,\n hyperparam_sets, FLAGS):\n\n print(\"===== Scheduled Experiments =====\")\n for hyperparam_set in hyperparam_sets:\n for hp in hyperparam_set:\n setattr(FLAGS, hp, hyperparam_set[hp])\n if hp == 'universal_keep':\n setattr(FLAGS, 'sc_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'sc_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_output_keep', hyperparam_set[hp])\n\n print(\"Trying parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n metrics = \"top1_temp_ms\"\n\n metrics_value = single_round_model_eval(\n train_fun, decode_fun, eval_fun, train_set, dev_set, metrics)\n print(\"Parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n print(\"{} = {}\".format(metrics, metrics_value))", "def run_profile(times, schedule, msid, model_spec, init, pseudo=None):\n\n model = setup_model(msid, times[0], times[-1], model_spec, init)\n\n for key, value in schedule.items():\n model.comp[key].set_data(value, times=times)\n\n model.make()\n model.calc()\n tmsid = model.get_comp(msid)\n results = {msid: tmsid}\n\n if pseudo is not None:\n results[pseudo] = model.get_comp(pseudo)\n\n return results", "def run_profile(times, schedule, msid, model_spec, init, pseudo=None):\n\n model = setup_model(msid, times[0], times[-1], model_spec, init)\n\n for key, value in schedule.items():\n model.comp[key].set_data(value, times=times)\n\n model.make()\n model.calc()\n tmsid = model.get_comp(msid)\n results = {msid: tmsid}\n\n if pseudo is not None:\n results[pseudo] = model.get_comp(pseudo)\n\n return results", "def test_instmap_picklable_with_arguments(self):\n instmap = FakeAthens().defaults().instruction_schedule_map\n\n param1 = Parameter(\"P1\")\n param2 = Parameter(\"P2\")\n sched = Schedule()\n sched.insert(0, Play(Constant(100, param1), DriveChannel(0)), inplace=True)\n sched.insert(0, Play(Constant(100, param2), DriveChannel(1)), inplace=True)\n to_assign = {\"P1\": 0.1, \"P2\": 0.2}\n\n # Note that dict keys is not picklable\n # Instmap should typecast it into list to pickle itself.\n instmap.add(\"custom\", (0, 1), sched, arguments=to_assign.keys())\n\n ser_obj = pickle.dumps(instmap)\n deser_instmap = pickle.loads(ser_obj)\n\n self.assertEqual(instmap, deser_instmap)", "def schedule_for(current_track, task, client_index):\n op = task.operation\n num_clients = task.clients\n sched = scheduler.scheduler_for(task.schedule, task.params)\n logger.info(\"Choosing [%s] for [%s].\" % (sched, task))\n runner_for_op = runner.runner_for(op.type)\n params_for_op = track.operation_parameters(current_track, op).partition(client_index, num_clients)\n\n if task.warmup_time_period is not None or task.time_period is not None:\n warmup_time_period = task.warmup_time_period if task.warmup_time_period else 0\n logger.info(\"Creating time-period based schedule with [%s] distribution for [%s] with a warmup period of [%s] seconds and a \"\n \"time period of [%s] seconds.\" % (task.schedule, op, str(warmup_time_period), str(task.time_period)))\n return time_period_based(sched, warmup_time_period, task.time_period, runner_for_op, params_for_op)\n else:\n logger.info(\"Creating iteration-count based schedule with [%s] distribution for [%s] with [%d] warmup iterations and \"\n \"[%d] iterations.\" % (task.schedule, op, task.warmup_iterations, task.iterations))\n return iteration_count_based(sched, task.warmup_iterations // num_clients, task.iterations // num_clients,\n runner_for_op, params_for_op)", "def test_has_custom_gate(self):\n backend = FakeOpenPulse2Q()\n instmap = backend.defaults().instruction_schedule_map\n\n self.assertFalse(instmap.has_custom_gate())\n\n # add custom schedule\n some_sched = Schedule()\n instmap.add(\"u3\", (0,), some_sched)\n\n self.assertTrue(instmap.has_custom_gate())\n\n # delete custom schedule\n instmap.remove(\"u3\", (0,))\n self.assertFalse(instmap.has_custom_gate())", "def _acquisition(\n params: ResonatorSpectroscopyAttenuationParameters,\n platform: Platform,\n qubits: Qubits,\n) -> ResonatorSpectroscopyAttenuationData:\n # create a sequence of pulses for the experiment:\n # MZ\n\n # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel\n sequence = PulseSequence()\n ro_pulses = {}\n amplitudes = {}\n attenuations = {}\n\n for qubit in qubits:\n ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0)\n if params.amplitude is not None:\n ro_pulses[qubit].amplitude = params.amplitude\n\n amplitudes[qubit] = ro_pulses[qubit].amplitude\n\n if params.attenuation is not None:\n platform.set_attenuation(qubit, params.attenuation)\n\n attenuations[qubit] = platform.get_attenuation(qubit)\n\n sequence.add(ro_pulses[qubit])\n\n # define the parameter to sweep and its range:\n delta_frequency_range = np.arange(\n -params.freq_width // 2, params.freq_width // 2, params.freq_step\n )\n sweeper = Sweeper(\n Parameter.frequency,\n delta_frequency_range,\n pulses=[ro_pulses[qubit] for qubit in qubits],\n type=SweeperType.OFFSET,\n )\n data = ResonatorSpectroscopyAttenuationData(\n resonator_type=platform.resonator_type,\n power_level=params.power_level,\n amplitudes=amplitudes,\n attenuations=attenuations,\n )\n\n results = platform.sweep(\n sequence,\n ExecutionParameters(\n nshots=params.nshots,\n relaxation_time=params.relaxation_time,\n acquisition_type=AcquisitionType.INTEGRATION,\n averaging_mode=AveragingMode.CYCLIC,\n ),\n sweeper,\n )\n\n # retrieve the results for every qubit\n for qubit in qubits:\n # average msr, phase, i and q over the number of shots defined in the runcard\n result = results[ro_pulses[qubit].serial]\n # store the results\n data.register_qubit(\n qubit,\n msr=result.magnitude,\n phase=result.phase,\n freq=delta_frequency_range + ro_pulses[qubit].frequency,\n )\n # finally, save the remaining data\n return data" ]
[ "0.70415074", "0.694117", "0.6068416", "0.5905929", "0.5543027", "0.5331215", "0.5095064", "0.5033579", "0.49605042", "0.48733565", "0.47713682", "0.47615978", "0.4757743", "0.4753119", "0.47397727", "0.47301975", "0.47144768", "0.46818984", "0.4673623", "0.463696", "0.46111366", "0.45750406", "0.45685127", "0.45474747", "0.45176986", "0.45176986", "0.44965482", "0.44918755", "0.44887158", "0.44786543" ]
0.7321922
0
Return a schedule which measures the requested qubits according to the given target and measure map, or by using the defaults provided by the backendV2.
def _measure_v2( qubits: List[int], target: Target, meas_map: Union[List[List[int]], Dict[int, List[int]]], qubit_mem_slots: Dict[int, int], measure_name: str = "measure", ) -> Schedule: schedule = Schedule(name=f"Default measurement schedule for qubits {qubits}") if isinstance(meas_map, list): meas_map = utils.format_meas_map(meas_map) meas_group = set() for qubit in qubits: meas_group |= set(meas_map[qubit]) meas_group = sorted(meas_group) meas_group_set = set(range(max(meas_group) + 1)) unassigned_qubit_indices = sorted(set(meas_group) - qubit_mem_slots.keys()) unassigned_reg_indices = sorted(meas_group_set - set(qubit_mem_slots.values()), reverse=True) if set(qubit_mem_slots.values()).issubset(meas_group_set): for qubit in unassigned_qubit_indices: qubit_mem_slots[qubit] = unassigned_reg_indices.pop() for measure_qubit in meas_group: try: if measure_qubit in qubits: default_sched = target.get_calibration(measure_name, (measure_qubit,)).filter( channels=[ channels.MeasureChannel(measure_qubit), channels.AcquireChannel(measure_qubit), ] ) schedule += _schedule_remapping_memory_slot(default_sched, qubit_mem_slots) except KeyError as ex: raise exceptions.PulseError( "We could not find a default measurement schedule called '{}'. " "Please provide another name using the 'measure_name' keyword " "argument. For assistance, the instructions which are defined are: " "{}".format(measure_name, target.instructions) ) from ex return schedule
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _measure_v1(\n qubits: List[int],\n inst_map: InstructionScheduleMap,\n meas_map: Union[List[List[int]], Dict[int, List[int]]],\n qubit_mem_slots: Optional[Dict[int, int]] = None,\n measure_name: str = \"measure\",\n) -> Schedule:\n\n schedule = Schedule(name=f\"Default measurement schedule for qubits {qubits}\")\n\n if isinstance(meas_map, list):\n meas_map = utils.format_meas_map(meas_map)\n\n measure_groups = set()\n for qubit in qubits:\n measure_groups.add(tuple(meas_map[qubit]))\n for measure_group_qubits in measure_groups:\n if qubit_mem_slots is not None:\n unused_mem_slots = set(measure_group_qubits) - set(qubit_mem_slots.values())\n try:\n default_sched = inst_map.get(measure_name, measure_group_qubits)\n except exceptions.PulseError as ex:\n raise exceptions.PulseError(\n \"We could not find a default measurement schedule called '{}'. \"\n \"Please provide another name using the 'measure_name' keyword \"\n \"argument. For assistance, the instructions which are defined are: \"\n \"{}\".format(measure_name, inst_map.instructions)\n ) from ex\n for time, inst in default_sched.instructions:\n if inst.channel.index not in qubits:\n continue\n if qubit_mem_slots and isinstance(inst, instructions.Acquire):\n if inst.channel.index in qubit_mem_slots:\n mem_slot = channels.MemorySlot(qubit_mem_slots[inst.channel.index])\n else:\n mem_slot = channels.MemorySlot(unused_mem_slots.pop())\n inst = instructions.Acquire(inst.duration, inst.channel, mem_slot=mem_slot)\n # Measurement pulses should only be added if its qubit was measured by the user\n schedule = schedule.insert(time, inst)\n\n return schedule", "def measure(\n qubits: List[int],\n backend=None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[Union[List[List[int]], Dict[int, List[int]]]] = None,\n qubit_mem_slots: Optional[Dict[int, int]] = None,\n measure_name: str = \"measure\",\n) -> Schedule:\n\n # backend is V2.\n if isinstance(backend, BackendV2):\n\n return _measure_v2(\n qubits=qubits,\n target=backend.target,\n meas_map=meas_map or backend.meas_map,\n qubit_mem_slots=qubit_mem_slots or dict(zip(qubits, range(len(qubits)))),\n measure_name=measure_name,\n )\n # backend is V1 or backend is None.\n else:\n try:\n return _measure_v1(\n qubits=qubits,\n inst_map=inst_map or backend.defaults().instruction_schedule_map,\n meas_map=meas_map or backend.configuration().meas_map,\n qubit_mem_slots=qubit_mem_slots,\n measure_name=measure_name,\n )\n except AttributeError as ex:\n raise exceptions.PulseError(\n \"inst_map or meas_map, and backend cannot be None simultaneously\"\n ) from ex", "def measure_all(backend) -> Schedule:\n # backend is V2.\n if isinstance(backend, BackendV2):\n qubits = list(range(backend.num_qubits))\n else:\n qubits = list(range(backend.configuration().n_qubits))\n return measure(qubits=qubits, backend=backend)", "def get_pulse_schedule(backend: IBMQBackend) -> Schedule:\n config = backend.configuration()\n defaults = backend.defaults()\n inst_map = defaults.instruction_schedule_map\n\n # Run 2 experiments - 1 with x pulse and 1 without\n x = inst_map.get('x', 0)\n measure = inst_map.get('measure', range(config.n_qubits)) << x.duration\n ground_sched = measure\n excited_sched = x | measure\n schedules = [ground_sched, excited_sched]\n return schedules", "def test_schedule_with_multiple_parameters_under_same_name(self):\n param1 = Parameter(\"param\")\n param2 = Parameter(\"param\")\n param3 = Parameter(\"param\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n target_sched.insert(20, ShiftPhase(param3, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(20, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def test_get_schedule_with_unbound_parameter(self):\n param1 = Parameter(\"param1\")\n param2 = Parameter(\"param2\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param2=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def apply_fixed_schedules(\n relay_mod: Union[RelayFunc, IRModule],\n target: Union[str, Target],\n params: Optional[Dict[str, NDArray]],\n schedule_fn: Callable[[ExtractedTask, Schedule], bool],\n):\n target = Target(target) if isinstance(target, str) else target\n extracted_tasks = extract_task_from_relay(relay_mod, target, params)\n\n database = DummyDatabase()\n\n for task in extracted_tasks:\n mod = Parse._mod(task.dispatched[0])\n sch = Schedule(mod)\n\n if schedule_fn(task, sch):\n workload = database.commit_workload(mod)\n tune_rec = TuningRecord(sch.trace, [0.0], workload, target, [])\n database.commit_tuning_record(tune_rec)\n\n return database", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()\n\n if self.get_param_value('clip_drive_amp'):\n for qb in self.qubits:\n tr_name = self.get_param_value('transition_name', qubit=qb.name)\n max_drive_amp = self.get_param_value('max_drive_amp',\n qubit=qb.name)\n if tr_name == 'ge' and qb.ge_amp180() > max_drive_amp:\n qb.ge_amp180(\n self.get_param_value('default_ge_amp180',\n qubit=qb.name))\n elif tr_name == 'ef' and qb.ef_amp180() > max_drive_amp:\n qb.ef_amp180(\n self.get_param_value('default_ef_amp180',\n qubit=qb.name))\n\n for qb in self.qubits:\n tr_name = self.get_param_value('transition_name', qubit=qb.name)\n amp_str = f'{tr_name}_amp180'\n self.results[qb.name] = {amp_str: getattr(qb, amp_str)()}", "def test_sequenced_parameterized_schedule(self):\n\n converter = QobjToInstructionConverter([], buffer=0)\n qobjs = [\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=10, phase=\"P1\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=20, phase=\"P2\"),\n PulseQobjInstruction(name=\"fc\", ch=\"d0\", t0=30, phase=\"P3\"),\n ]\n converted_instruction = [converter(qobj) for qobj in qobjs]\n\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"inst_seq\", 0, Schedule(*converted_instruction, name=\"inst_seq\"))\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=3, P4=4, P5=5)\n\n with self.assertRaises(PulseError):\n inst_map.get(\"inst_seq\", 0, 1, 2, 3, 4, 5, 6, 7, 8)\n\n p3_expr = Parameter(\"p3\")\n p3_expr = p3_expr.bind({p3_expr: 3})\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, P1=1, P2=2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)\n\n sched = inst_map.get(\"inst_seq\", 0, 1, 2, P3=p3_expr)\n self.assertEqual(sched.instructions[0][-1].phase, 1)\n self.assertEqual(sched.instructions[1][-1].phase, 2)\n self.assertEqual(sched.instructions[2][-1].phase, 3)", "def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n transition_name_v = task_list_fields.get('transition_name')\n tr_name = self.get_param_value('transition_name',\n qubit=qb.name,\n default=transition_name_v[1])\n task['transition_name'] = tr_name\n\n value_params = {'v_low': None, 'v_high': None, 'pts': None}\n # The information about the custom parameters above could be\n # Saved somewhere else to generalize all wrappers\n\n default = self.get_param_value(f'default_{tr_name}_amp180',\n qubit=qb.name)\n current = qb.parameters[f'{tr_name}_amp180']()\n max = self.get_param_value('max_drive_amp', qubit=qb.name)\n n = self.get_param_value('n', qubit=qb.name)\n\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n if isinstance(value, str):\n value = eval(\n value.format(current=current,\n max=max,\n default=default,\n n=n))\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # Get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(value_params['v_low'],\n value_params['v_high'],\n value_params['pts'])\n task['sweep_points'] = SweepPoints()\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super", "def test_schedule_generator(self):\n\n dur_val = 10\n amp = 1.0\n\n def test_func(dur: int):\n sched = Schedule()\n sched += Play(library.constant(int(dur), amp), DriveChannel(0))\n return sched\n\n expected_sched = Schedule()\n expected_sched += Play(library.constant(dur_val, amp), DriveChannel(0))\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"f\", (0,), test_func)\n self.assertEqual(inst_map.get(\"f\", (0,), dur_val), expected_sched)\n\n self.assertEqual(inst_map.get_parameters(\"f\", (0,)), (\"dur\",))", "def _parse_run_args(backend, qobj_id, qobj_header,\n shots, memory, max_credits, seed_simulator,\n default_qubit_los, default_meas_los,\n schedule_los, meas_level, meas_return,\n memory_slots, memory_slot_size, rep_time,\n parameter_binds, **run_config):\n # grab relevant info from backend if it exists\n backend_config = None\n backend_default = None\n if backend:\n backend_config = backend.configuration()\n # TODO : Remove usage of config.defaults when backend.defaults() is updated.\n try:\n backend_default = backend.defaults()\n except (ModelValidationError, AttributeError):\n from collections import namedtuple\n backend_config_defaults = getattr(backend_config, 'defaults', {})\n BackendDefault = namedtuple('BackendDefault', ('qubit_freq_est', 'meas_freq_est'))\n backend_default = BackendDefault(\n qubit_freq_est=backend_config_defaults.get('qubit_freq_est'),\n meas_freq_est=backend_config_defaults.get('meas_freq_est')\n )\n\n memory_slots = memory_slots or getattr(backend_config, 'memory_slots', None)\n rep_time = rep_time or getattr(backend_config, 'rep_times', None)\n if isinstance(rep_time, list):\n rep_time = rep_time[-1]\n\n parameter_binds = parameter_binds or []\n\n # add default empty lo config\n schedule_los = schedule_los or []\n if isinstance(schedule_los, (LoConfig, dict)):\n schedule_los = [schedule_los]\n\n # Convert to LoConfig if lo configuration supplied as dictionary\n schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)\n for lo_config in schedule_los]\n\n qubit_lo_freq = default_qubit_los or getattr(backend_default, 'qubit_freq_est', [])\n meas_lo_freq = default_meas_los or getattr(backend_default, 'meas_freq_est', [])\n\n # an identifier for the Qobj\n qobj_id = qobj_id or str(uuid.uuid4())\n\n # The header that goes at the top of the Qobj (and later Result)\n # we process it as dict, then write entries that are not None to a QobjHeader object\n qobj_header = qobj_header or {}\n if isinstance(qobj_header, QobjHeader):\n qobj_header = qobj_header.to_dict()\n backend_name = getattr(backend_config, 'backend_name', None)\n backend_version = getattr(backend_config, 'backend_version', None)\n qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),\n **qobj_header}\n qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})\n\n # create run configuration and populate\n run_config_dict = dict(shots=shots,\n memory=memory,\n max_credits=max_credits,\n seed_simulator=seed_simulator,\n seed=seed_simulator, # deprecated\n qubit_lo_freq=qubit_lo_freq,\n meas_lo_freq=meas_lo_freq,\n schedule_los=schedule_los,\n meas_level=meas_level,\n meas_return=meas_return,\n memory_slots=memory_slots,\n memory_slot_size=memory_slot_size,\n rep_time=rep_time,\n parameter_binds=parameter_binds,\n **run_config)\n run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})\n\n return qobj_id, qobj_header, run_config", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n\n if self.experiment_options.frequency_shift is None:\n try:\n anharm, _ = backend.properties().qubit_property(self.physical_qubits[0])[\n \"anharmonicity\"\n ]\n self.set_experiment_options(frequency_shift=anharm)\n except KeyError as key_err:\n raise CalibrationError(\n f\"The backend {backend} does not provide an anharmonicity for qubit \"\n f\"{self.physical_qubits[0]}. Use EFRabi.set_experiment_options(frequency_shift=\"\n f\"anharmonicity) to manually set the correct frequency for the 1-2 transition.\"\n ) from key_err\n except AttributeError as att_err:\n raise CalibrationError(\n \"When creating the default schedule without passing a backend, the frequency needs \"\n \"to be set manually through EFRabi.set_experiment_options(frequency_shift=..).\"\n ) from att_err\n\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=self.__rabi_gate_name__) as default_schedule:\n with pulse.frequency_offset(\n self.experiment_options.frequency_shift,\n pulse.DriveChannel(self.physical_qubits[0]),\n ):\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def _read_target_schedule(self):\n target_schedule = self.target_schedule\n target_select = self.target_select\n\n #-- list of scheduled times to be returned\n self.date_lst_xtgt = []\n\n if target_schedule!=None:\n if target_select!=None:\n msg = \"target_schedule and target_select were both specified, \" \\\n \"'target_select' will be ignored here!\"\n FileLogger.warn(msg)\n msg = \"target schedule will be read from file ***{}***\".format(target_schedule)\n FileLogger.info(msg)\n with open(target_schedule,'r') as fp:\n for line in fp:\n if line[0]=='#':\n continue\n else:\n # print \"---{}---\".format(line.rstrip())\n date_utc = dt.datetime.strptime(line.rstrip(),'%Y%m%dT%H:%M:%S')\n if self.time_start!=None and date_utc<self.time_start:\n continue\n elif self.time_end!=None and date_utc>self.time_end:\n continue\n else:\n self.date_lst_xtgt.append(date_utc)\n #-- ensure time-increase ordering\n self.date_lst_xtgt = sorted(self.date_lst_xtgt)\n nxtgt = len(self.date_lst_xtgt)\n msg = \"...reading target schedule DONE (nxtgt={})\".format(nxtgt)\n FileLogger.info(msg)\n elif target_select!=None:\n msg = \"target schedule will be determined from specification ---{}---\".format(\n target_select)\n FileLogger.info(msg)\n ttgt_min = target_select[0]\n ttgt_max = target_select[1]\n ttgt_delta = target_select[2]\n ttgt_min = dt.datetime.strptime(ttgt_min,'%Y%m%dT%H:%M')\n ttgt_max = dt.datetime.strptime(ttgt_max,'%Y%m%dT%H:%M')\n if ttgt_delta[-1].lower()=='h':\n ttgt_delta = dt.timedelta(hours=float(ttgt_delta[0:-1]))\n elif ttgt_delta[-1].lower()=='d':\n ttgt_delta = dt.timedelta(days=float(ttgt_delta[0:-1]))\n date_utc = ttgt_min\n while date_utc<=ttgt_max:\n if self.time_start!=None and date_utc<self.time_start:\n pass\n elif self.time_end!=None and date_utc>self.time_end:\n pass\n else:\n self.date_lst_xtgt.append(date_utc)\n #-- increment date\n date_utc += ttgt_delta\n msg = \"read {} state components at extra target times.\".format(len(self.date_lst_xtgt))\n FileLogger.info(msg)", "def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab", "def schedule_for(current_track, task, client_index):\n op = task.operation\n num_clients = task.clients\n sched = scheduler.scheduler_for(task.schedule, task.params)\n logger.info(\"Choosing [%s] for [%s].\" % (sched, task))\n runner_for_op = runner.runner_for(op.type)\n params_for_op = track.operation_parameters(current_track, op).partition(client_index, num_clients)\n\n if task.warmup_time_period is not None or task.time_period is not None:\n warmup_time_period = task.warmup_time_period if task.warmup_time_period else 0\n logger.info(\"Creating time-period based schedule with [%s] distribution for [%s] with a warmup period of [%s] seconds and a \"\n \"time period of [%s] seconds.\" % (task.schedule, op, str(warmup_time_period), str(task.time_period)))\n return time_period_based(sched, warmup_time_period, task.time_period, runner_for_op, params_for_op)\n else:\n logger.info(\"Creating iteration-count based schedule with [%s] distribution for [%s] with [%d] warmup iterations and \"\n \"[%d] iterations.\" % (task.schedule, op, task.warmup_iterations, task.iterations))\n return iteration_count_based(sched, task.warmup_iterations // num_clients, task.iterations // num_clients,\n runner_for_op, params_for_op)", "def getMeasures(unique_name=None):", "def __init__(self, learn_q, target_estimator, td_loss_fcn=None):\n super(FitTargetQ, self).__init__()\n # unpack params\n self._q, self._target_estimator = learn_q, target_estimator\n if td_loss_fcn is None:\n td_loss_fcn = tf.square\n # need computed target Q values and selected action as input\n self._input_target_q = tf.placeholder(\n dtype=tf.float32, shape=[None], name=\"input_target_q\")\n self._input_action = tf.placeholder(\n dtype=tf.uint8, shape=[None], name=\"input_action\")\n self._input_sample_weight = tf.placeholder_with_default([1.0], shape=[None], name=\"input_weight\")\n op_q = learn_q.output().op\n num_actions = learn_q.output().op.shape.as_list()[-1]\n self.selected_q = tf.reduce_sum(\n tf.one_hot(self._input_action, num_actions) * op_q, axis=1)\n self._op_td = self.selected_q - self._input_target_q\n self._op_losses = td_loss_fcn(self._op_td)\n self._op_losses_weighted = self._op_losses * self._input_sample_weight\n self._sym_loss = tf.reduce_mean(self._op_losses_weighted)\n self._update_operation = network.MinimizeLoss(self._sym_loss, var_list=self._q.variables)", "def initRemainderMulticastOPT(self, fromTime, toTime, fluct, criterion_type):\n #reset\n self.MsgReceiveCount_interval = 0\n self.MsgSendCount_interval = 0\n self.origins = []\n self.pathLengths = []\n self.globalMin = []\n self.globalMinSchedIdx = []\n self.overall_min = 0\n self.overall_max_path_length = 0\n self.min_path = []\n self.min_path_schedules = []\n self.chosenSchedule = []\n self.schedules = []\n self.EConsumptionChosenSchedule = []\n self.chosenScheduleIndex = -1\n\n\n #save data\n self.fromTime = fromTime\n self.toTime = toTime\n self.noOfTimesteps = (self.toTime - self.fromTime) / self.stepSize + 1\n self.EFluctuationCurve = fluct\n self.OPTcriterion = criterion_type\n\n self.overall_max_path_length = 0\n if self.OPTcriterion == 'maxmindiff':\n self.overall_min = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n self.overall_min = 0\n for a in range(len(self.EFluctuationCurve)):\n self.overall_min += abs(self.EFluctuationCurve[a])\n\n #self.globalMin = [max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n\n\n #calc schedule pool and schedule load curves\n if not self.isGasBoiler():\n self.calcSchedulePool(fromTime, toTime)\n self.calcScheduleConsumptionCurves()\n return", "def getMeasures():", "def test_init_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n param = CentTendParam(riskfree=riskfree,\n lmbd=lmbd, lmbd_s=lmbd_s, lmbd_y=lmbd_y,\n mean_v=mean_v, kappa_s=kappa_s, kappa_y=kappa_y,\n eta_s=eta_s, eta_y=eta_y, rho=rho, measure='Q')\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n param.convert_to_q()", "def optimize(\n target_name: str,\n setting: dict,\n start: datetime,\n end: datetime,\n rate: float,\n slippage: float,\n size: float,\n pricetick: float,\n capital: int,\n):\n backtest_me = BacktestMainEngine()\n backtest_me.set_parameters(\n start=start,\n end=end,\n rate=rate,\n slippage=slippage,\n size=size,\n pricetick=pricetick,\n capital=capital)\n backtest_me.setting = setting\n # 启动价差引擎\n backtest_me.engines['St'].start()\n # 显示回测结果\n df = backtest_me.calculate_result()\n statistics = backtest_me.calculate_statistics()\n backtest_me.save_chart(setting)\n for k, v in setting.items():\n statistics[k] = v\n # statistics['start'] = start.date()\n # statistics['end'] = end.date()\n # statistics['rate'] = rate\n # statistics['slippage'] = slippage\n # statistics['size'] = size\n # statistics['pricetick'] = pricetick\n statistics['capital'] = capital\n statistics['capital'] = capital\n if statistics['max_ddpercent']:\n statistics['收益回撤比'] = statistics['total_return'] / statistics['max_ddpercent']\n if statistics['profit_days'] + statistics['loss_days']:\n statistics['胜率'] = statistics['profit_days'] / (statistics['profit_days'] + statistics['loss_days'])\n return statistics", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=\"rabi\") as default_schedule:\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def evaluate(QualityMeasure,ModelClass,dataset,subgroup,target1,target2): \r\n evaluator = {\r\n QualityMeasure.SCD: evaluate_scd,\r\n }\r\n return evaluator.get(QualityMeasure)(ModelClass,dataset,subgroup,target1,target2)", "def test_get_measure_parameters(self):\n pass", "def build_target_query(self, observable: Observable, **kwargs) -> None:\n\n # XXX for some reason the self.target_query is getting cached when the same module runs for the same analysis\n # for different observables\n if '<O_VALUE>' not in self.target_query:\n self._reload_target_query()\n logging.debug(f\"had to reset self.target_query to clear previous use\")\n\n self.target_query = self.target_query.replace('<O_TYPE>', observable.type) \\\n .replace('<O_VALUE>', observable.value) # TODO property escape stuff\n\n source_time = kwargs.get('source_event_time') or observable.time or self.root.event_time_datetime\n # if we are going off of the event time, then we use the wide duration\n start_time = source_time - self.wide_duration_before\n stop_time = source_time + self.wide_duration_after\n\n # if observable time is available, we can narrow our time spec duration\n if observable.time is not None:\n start_time = source_time - self.narrow_duration_before\n stop_time = source_time + self.narrow_duration_after\n\n self.fill_target_query_timespec(start_time, stop_time)", "async def svc_get_zone_schedule(self, **kwargs) -> None:\n # {{ state_attr('climate.ramses_cc_01_145038_04', 'schedule') }}\n await self._device.get_schedule()\n self.update_ha_state()", "def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:\n return [self.state._measure(self.qubit_map[q], self.prng) for q in qubits]", "def test_get_measure_parameters_by_id(self):\n pass", "def schedule_experiments(train_fun, decode_fun, eval_fun, train_set, dev_set,\n hyperparam_sets, FLAGS):\n\n print(\"===== Scheduled Experiments =====\")\n for hyperparam_set in hyperparam_sets:\n for hp in hyperparam_set:\n setattr(FLAGS, hp, hyperparam_set[hp])\n if hp == 'universal_keep':\n setattr(FLAGS, 'sc_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'sc_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_output_keep', hyperparam_set[hp])\n\n print(\"Trying parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n metrics = \"top1_temp_ms\"\n\n metrics_value = single_round_model_eval(\n train_fun, decode_fun, eval_fun, train_set, dev_set, metrics)\n print(\"Parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n print(\"{} = {}\".format(metrics, metrics_value))" ]
[ "0.68220955", "0.6573847", "0.6006834", "0.5501297", "0.5030022", "0.50073725", "0.47666526", "0.47343415", "0.47234833", "0.46335405", "0.4628351", "0.4601694", "0.45817143", "0.45776293", "0.45147318", "0.44863975", "0.44642326", "0.4463367", "0.44403467", "0.44367078", "0.44255114", "0.44245484", "0.44243827", "0.44216263", "0.44202965", "0.4409224", "0.44017446", "0.43952876", "0.4390451", "0.437458" ]
0.7432348
0
Return a Schedule which measures all qubits of the given backend.
def measure_all(backend) -> Schedule: # backend is V2. if isinstance(backend, BackendV2): qubits = list(range(backend.num_qubits)) else: qubits = list(range(backend.configuration().n_qubits)) return measure(qubits=qubits, backend=backend)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pulse_schedule(backend: IBMQBackend) -> Schedule:\n config = backend.configuration()\n defaults = backend.defaults()\n inst_map = defaults.instruction_schedule_map\n\n # Run 2 experiments - 1 with x pulse and 1 without\n x = inst_map.get('x', 0)\n measure = inst_map.get('measure', range(config.n_qubits)) << x.duration\n ground_sched = measure\n excited_sched = x | measure\n schedules = [ground_sched, excited_sched]\n return schedules", "def circuits(self, backend: Optional[Backend] = None) -> List[QuantumCircuit]:\n schedule = self.experiment_options.get(\"schedule\", None)\n\n if schedule is None:\n schedule = self._default_gate_schedule(backend=backend)\n else:\n if self.physical_qubits[0] not in set(ch.index for ch in schedule.channels):\n raise CalibrationError(\n f\"User provided schedule {schedule.name} does not contain a channel \"\n \"for the qubit on which to run Rabi.\"\n )\n\n if len(schedule.parameters) != 1:\n raise CalibrationError(\"Schedule in Rabi must have exactly one free parameter.\")\n\n param = next(iter(schedule.parameters))\n\n # Create template circuit\n circuit = self._template_circuit(param)\n circuit.add_calibration(\n self.__rabi_gate_name__, (self.physical_qubits[0],), schedule, params=[param]\n )\n\n # Create the circuits to run\n circs = []\n for amp in self.experiment_options.amplitudes:\n amp = np.round(amp, decimals=6)\n assigned_circ = circuit.assign_parameters({param: amp}, inplace=False)\n assigned_circ.metadata = {\n \"experiment_type\": self._type,\n \"qubits\": (self.physical_qubits[0],),\n \"xval\": amp,\n \"unit\": \"arb. unit\",\n \"amplitude\": amp,\n \"schedule\": str(schedule),\n }\n\n if backend:\n assigned_circ.metadata[\"dt\"] = getattr(backend.configuration(), \"dt\", \"n.a.\")\n\n circs.append(assigned_circ)\n\n return circs", "def measure(\n qubits: List[int],\n backend=None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[Union[List[List[int]], Dict[int, List[int]]]] = None,\n qubit_mem_slots: Optional[Dict[int, int]] = None,\n measure_name: str = \"measure\",\n) -> Schedule:\n\n # backend is V2.\n if isinstance(backend, BackendV2):\n\n return _measure_v2(\n qubits=qubits,\n target=backend.target,\n meas_map=meas_map or backend.meas_map,\n qubit_mem_slots=qubit_mem_slots or dict(zip(qubits, range(len(qubits)))),\n measure_name=measure_name,\n )\n # backend is V1 or backend is None.\n else:\n try:\n return _measure_v1(\n qubits=qubits,\n inst_map=inst_map or backend.defaults().instruction_schedule_map,\n meas_map=meas_map or backend.configuration().meas_map,\n qubit_mem_slots=qubit_mem_slots,\n measure_name=measure_name,\n )\n except AttributeError as ex:\n raise exceptions.PulseError(\n \"inst_map or meas_map, and backend cannot be None simultaneously\"\n ) from ex", "def getSchedulers():", "def basicSetup(self):\n stats = self.default_statistics()\n sched, _ = self.schedulerSetup(stats[\"max_trials\"])\n\n self.assertEqual(len(sched._hyperbands), 1)\n self.assertEqual(sched._cur_band_filled(), True)\n\n filled_band = sched._hyperbands[0]\n for bracket in filled_band:\n self.assertEqual(bracket.filled(), True)\n return sched", "def create_from(cls, backend):\n backend_config = backend.configuration()\n\n # TODO : Remove usage of config.defaults when backend.defaults() is updated.\n try:\n backend_default = backend.defaults()\n buffer = backend_default.buffer\n except ModelValidationError:\n try:\n buffer = backend_config.defaults.get('buffer', 0)\n except AttributeError:\n buffer = 0\n\n # system size\n n_qubits = backend_config.n_qubits\n n_registers = backend_config.n_registers\n n_uchannels = backend_config.n_uchannels\n\n # generate channels with assuming their numberings are aligned with qubits\n drives = [DriveChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n measures = [MeasureChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n controls = [ControlChannel(i, buffer=buffer) for i in range(n_uchannels)]\n\n acquires = [AcquireChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n qubits = []\n for i in range(n_qubits):\n # TODO: get qubits <-> channels relationship from backend\n qubit = Qubit(i, drives[i], measures[i], acquires[i],\n control_channels=[] if not controls else controls)\n qubits.append(qubit)\n\n registers = [RegisterSlot(i) for i in range(n_registers)]\n # TODO: get #mem_slots from backend\n mem_slots = [MemorySlot(i) for i in range(len(qubits))]\n\n return DeviceSpecification(qubits, registers, mem_slots)", "def getSchedules(self) :\n return self.schedules", "def _measure_v1(\n qubits: List[int],\n inst_map: InstructionScheduleMap,\n meas_map: Union[List[List[int]], Dict[int, List[int]]],\n qubit_mem_slots: Optional[Dict[int, int]] = None,\n measure_name: str = \"measure\",\n) -> Schedule:\n\n schedule = Schedule(name=f\"Default measurement schedule for qubits {qubits}\")\n\n if isinstance(meas_map, list):\n meas_map = utils.format_meas_map(meas_map)\n\n measure_groups = set()\n for qubit in qubits:\n measure_groups.add(tuple(meas_map[qubit]))\n for measure_group_qubits in measure_groups:\n if qubit_mem_slots is not None:\n unused_mem_slots = set(measure_group_qubits) - set(qubit_mem_slots.values())\n try:\n default_sched = inst_map.get(measure_name, measure_group_qubits)\n except exceptions.PulseError as ex:\n raise exceptions.PulseError(\n \"We could not find a default measurement schedule called '{}'. \"\n \"Please provide another name using the 'measure_name' keyword \"\n \"argument. For assistance, the instructions which are defined are: \"\n \"{}\".format(measure_name, inst_map.instructions)\n ) from ex\n for time, inst in default_sched.instructions:\n if inst.channel.index not in qubits:\n continue\n if qubit_mem_slots and isinstance(inst, instructions.Acquire):\n if inst.channel.index in qubit_mem_slots:\n mem_slot = channels.MemorySlot(qubit_mem_slots[inst.channel.index])\n else:\n mem_slot = channels.MemorySlot(unused_mem_slots.pop())\n inst = instructions.Acquire(inst.duration, inst.channel, mem_slot=mem_slot)\n # Measurement pulses should only be added if its qubit was measured by the user\n schedule = schedule.insert(time, inst)\n\n return schedule", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=\"rabi\") as default_schedule:\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def schedule_get_buckets(self):\n return batch_ops_utils.ScheduledStampedResourceOp(\n resource_handle=self._quantile_accumulator_handle,\n op=gen_quantile_ops.quantile_accumulator_get_buckets)", "def measure_all(qubit, format='sympy', normalize=True):\n m = qubit_to_matrix(qubit, format)\n\n if format == 'sympy':\n results = []\n\n if normalize:\n m = m.normalized()\n\n size = max(m.shape) # Max of shape to account for bra or ket\n nqubits = int(math.log(size)/math.log(2))\n for i in range(size):\n if m[i] != 0.0:\n results.append(\n (Qubit(IntQubit(i, nqubits=nqubits)), m[i]*conjugate(m[i]))\n )\n return results\n else:\n raise NotImplementedError(\n \"This function cannot handle non-SymPy matrix formats yet\"\n )", "def all_schedules(self):\n return self._all_schedules", "def calculate_queues(self):\n\t\t#queues = [get_queue(lane) for lane in self.Vissim_Lanes]\n\t\t\n\t\tqueues = [0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\t\treturn queues", "def all(cls, resq, start=0, count=1):\n first = MultipleBackend.classes[0]\n return first.all(resq, start, count)", "def get_schedules(self):\n return self.__schedules", "def all():\n schedule = Scheduler()\n schedule.committees()\n schedule.legislators()\n schedule.bills()", "def _get_schedule(self, graph, implementations):\n SchedulerTask = namedtuple(\"SchedulerTask\", [\"time\", \"node\", \"implementation\"])\n schedule = []\n for num, node in enumerate(graph.nodes):\n schedule.append(SchedulerTask(num, node, implementations[node]))\n\n return schedule", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n\n if self.experiment_options.frequency_shift is None:\n try:\n anharm, _ = backend.properties().qubit_property(self.physical_qubits[0])[\n \"anharmonicity\"\n ]\n self.set_experiment_options(frequency_shift=anharm)\n except KeyError as key_err:\n raise CalibrationError(\n f\"The backend {backend} does not provide an anharmonicity for qubit \"\n f\"{self.physical_qubits[0]}. Use EFRabi.set_experiment_options(frequency_shift=\"\n f\"anharmonicity) to manually set the correct frequency for the 1-2 transition.\"\n ) from key_err\n except AttributeError as att_err:\n raise CalibrationError(\n \"When creating the default schedule without passing a backend, the frequency needs \"\n \"to be set manually through EFRabi.set_experiment_options(frequency_shift=..).\"\n ) from att_err\n\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=self.__rabi_gate_name__) as default_schedule:\n with pulse.frequency_offset(\n self.experiment_options.frequency_shift,\n pulse.DriveChannel(self.physical_qubits[0]),\n ):\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:\n return [self.state._measure(self.qubit_map[q], self.prng) for q in qubits]", "def list_queues():\n url = urlparse(Config.RABBIT_MQ_URL)\n response = requests.get(f'http://{url.hostname}:{15672}/api/queues?columns=name,messages,'\\\n 'messages_ready,messages_unacknowledged',\n auth=(url.username, url.password))\n\n tasks = dict()\n\n for task in response.json():\n if 'cube' in task['name']:\n tasks[task['name']] = dict(total=task['messages'],\n ready=task['messages_ready'],\n unacked=task['messages_unacknowledged'])\n\n return tasks", "def get_large_circuit(backend: IBMQBackend) -> QuantumCircuit:\n n_qubits = min(backend.configuration().n_qubits, 20)\n circuit = QuantumCircuit(n_qubits, n_qubits)\n for n in range(n_qubits-1):\n circuit.h(n)\n circuit.cx(n, n+1)\n circuit.measure(list(range(n_qubits)), list(range(n_qubits)))\n\n return circuit", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers", "def _get_schedulers(self):\n return self.__schedulers" ]
[ "0.66667795", "0.5666289", "0.5489311", "0.5294971", "0.52392757", "0.5196187", "0.50862503", "0.5060072", "0.5027779", "0.4976724", "0.49552643", "0.49435383", "0.49327588", "0.49213088", "0.49122518", "0.4867355", "0.4866859", "0.4860552", "0.48551446", "0.48330986", "0.4830298", "0.48279867", "0.48279867", "0.48279867", "0.48279867", "0.48279867", "0.48279867", "0.48279867", "0.48279867", "0.48279867" ]
0.82172567
0
this method compute the approximated moments of a wrightfisher process after gen generations. If store is true, all moments used in the computations are stored is the moments attribute such that self.moments[i, j, k, l, m, n, o, p] is the ith moment of the wrightfisher process after j generations from an initial frequency
def compute_moments(self, gen, store=True): # the last moment generation already computed is last_gen = self.moments.shape[1] - 1 # if the moment is already computed, return it if last_gen >= gen: return self.moments[:, gen, ] # otherwise do the recursion # if store is true, increase the self.moments matrix size to store the # new computations if store: self.moments.resize(2, gen + 1, *self.moments.shape[2:], refcheck=False) # getting the fitness function fit = self.fitness[0] fit1 = self.fitness[1] fit2 = self.fitness[2] prev_mean = self.moments[0, last_gen, ] prev_var = self.moments[1, last_gen, ] # initializing recursion for WF case if self.rec == 'WF': # initializing transition matrix and the WF process distribution trans = [] prev_dist = [] # for each size, compute the corresponding transition and WF # distribution for index, size in enumerate(self.N): # vector of all states k_over_N = np.array([i / size for i in range(size + 1)]) # reshaping to fit all parameters k_over_N = k_over_N.reshape((size + 1, 1, 1, 1, 1, 1)) # vector of all possible states we can reach k = np.arange(size + 1).reshape((1, size + 1, 1, 1, 1, 1)) # the fitness vector for every state p = fit(k_over_N) # correcting some wrong values exceeding slightly 1 mask = np.logical_and(p > 1, p < 1 + 1e-10) p[mask] = 1 # the corresponding transition new_trans = sss.binom.pmf(k=k, n=size, p=p) # adding this new_trans to the list trans.append(new_trans) # setting the initial distribution new_dist = [] # for every initial frequency for j, freq in enumerate(self.x0): # the initial distribution is full of zeros except on the # corresponding number of individual (N * x0) dist = np.zeros(shape=(1, size + 1, len(self.s), len(self.h), len(self.u), len(self.v))) dist[0, int(round(freq * size))] = 1 # the first moment has to be modified (for example, if # x0 = 0.5789 with N = 1000) self.moments[0, 0, j, index, ] = (int(round(freq * size)) / size) # adding this new distribution to the list new_dist.append(dist) # adding this list to the bigger list prev_dist.append(new_dist) # prev_dist[i][j][:, k, l, m, n] contains the initial allele # frequency distribution for N = self.N[i], x0 = self.x0[j], # s = self.s[k], h = self.h[l], u = self.u[m], v = self.v[n] # do the recursion until this time while last_gen < gen: if self.rec == 'Lac': # Lacerda recursion : mean = f(mean) # var = f(mean)(1 - f(mean)) / N # + f'(mean)²*var next_mean = fit(prev_mean[np.newaxis, ]) next_var = next_mean * (1 - next_mean) / self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] next_var += (fit1(prev_mean[np.newaxis, ]) ** 2 * prev_var[np.newaxis, ]) elif self.rec == 'Ter': next_mean = (fit(self.ter_det[np.newaxis, ]) + fit1(self.ter_det[np.newaxis, ]) * self.ter_err1 + (fit2(self.ter_det[np.newaxis, ]) * self.ter_err2 / 2)) next_var = ((fit(self.ter_det[np.newaxis, ]) * (1 - fit(self.ter_det[np.newaxis, ]))) + (fit1(self.ter_det[np.newaxis, ]) * (1 - 2 * fit(self.ter_det[np.newaxis, ])) * self.ter_err1) + (fit1(self.ter_det[np.newaxis, ]) ** 2 * self.ter_err2)) / self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] next_var += (fit1(self.ter_det[np.newaxis, ]) ** 2 * prev_var[np.newaxis, ]) self.ter_err1 = ((fit1(self.ter_det[np.newaxis, ]) * self.ter_err1) + (fit2(self.ter_det[np.newaxis, ]) * self.ter_err2 / 2)) self.ter_err2 = next_var + self.ter_err1 ** 2 self.ter_det = fit(self.ter_det[np.newaxis, ])[0, ] elif self.rec == 'Tay1': next_mean = fit(prev_mean[np.newaxis, ]) next_var = next_mean * (1 - next_mean) / self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] next_var += ((1 - 1/self.N[np.newaxis, : , np.newaxis, np.newaxis, np.newaxis, np.newaxis]) * fit1(prev_mean[np.newaxis, ]) ** 2 * prev_var[np.newaxis, ]) elif self.rec == 'Tay2': next_mean = (fit(prev_mean[np.newaxis, ]) + fit2(prev_mean[np.newaxis, ]) * prev_var[np.newaxis, ] / 2) next_mean[next_mean > 1] = 1 next_mean[next_mean < 0] = 0 next_var = ((1 - 1/self.N[np.newaxis, : , np.newaxis, np.newaxis, np.newaxis, np.newaxis]) * fit1(prev_mean[np.newaxis, ]) ** 2 * prev_var[np.newaxis, ]) next_var[next_var < 0] = 0 next_var += next_mean * (1 - next_mean) / self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] next_var[next_var < 0] = 0 next_var[next_var > 1] = 1 next_var[ np.greater_equal( next_var, next_mean * (1 - next_mean))] = ( (next_mean * (1 - next_mean))[ np.greater_equal( next_var, next_mean * (1 - next_mean))]) elif self.rec == 'WF': next_mean = np.full(shape=(1, len(self.x0), len(self.N), len(self.s), len(self.h), len(self.u), len(self.v)), fill_value=-1, dtype=float) next_var = np.full(shape=(1, len(self.x0), len(self.N), len(self.s), len(self.h), len(self.u), len(self.v)), fill_value=-1, dtype=np.float) for i, size in enumerate(self.N): for j, freq in enumerate(self.x0): # getting the next distribution for the wright fisher # process # making the matrix product inv_axes = list(range(len(prev_dist[i][j].shape))) inv_axes[0] = 1 inv_axes[1] = 0 next_dist = np.transpose(prev_dist[i][j], inv_axes) next_dist = next_dist * trans[i] next_dist = np.sum(next_dist, axis=0) next_dist = next_dist[np.newaxis, ] assert next_dist.shape == prev_dist[i][j].shape # updating the distribution list prev_dist[i][j] = next_dist # computing moments k_over_N = np.arange(size + 1) / size k_over_N = k_over_N.reshape(1, size + 1, 1, 1, 1, 1) assert len(k_over_N.shape) == len(next_dist.shape) k_over_N_squared = k_over_N ** 2 next_mean[0, j, i] = np.sum(k_over_N * next_dist, axis=1) next_var[0, j, i] = (np.sum(k_over_N_squared * next_dist, axis=1) - next_mean[0, j, i] ** 2) else: raise NotImplementedError prev_mean = next_mean[0, ] prev_var = next_var[0, ] last_gen += 1 if store: self.moments[0, last_gen, ] = prev_mean.copy() self.moments[1, last_gen, ] = prev_var.copy() # we return in the same shape it's stored ret_mat = np.empty(shape=(2, *self.moments.shape[2:])) ret_mat[0, ] = prev_mean ret_mat[1, ] = prev_var return ret_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def compute_fixations(self, gen, app = 'beta_tataru',\n store = True, **kwargs):\n # setting the approximation recursion\n self.app = app\n \n # the last fixation probability computed is\n last_gen_fix = self.fix_proba.shape[1] - 1\n\n # if the probability is already computed, return it\n if last_gen_fix >= gen:\n return self.fix_proba[:, gen, ]\n \n # the last moment generation already computed is\n last_gen_mom = self.moments.shape[1] - 1\n\n # approximated moments until this time are needed\n if gen > last_gen_mom:\n self.compute_moments(gen = gen, store = True)\n\n\n # if store is true, increase the self.fix_proba matrix size to store\n # the new computations\n if store:\n fix_proba_t = np.full(shape = (2, gen + 1,\n *self.fix_proba.shape[2:]),\n fill_value = np.nan)\n fix_proba_t[:, :(last_gen_fix + 1), ] = self.fix_proba\n self.fix_proba = fix_proba_t\n\n # getting the fitness function and it's derivatives\n fit = self.fitness[0]\n fit1 = self.fitness[1]\n fit2 = self.fitness[2]\n\n prev_p0 = self.fix_proba[0, last_gen_fix].copy()\n prev_p1 = self.fix_proba[1, last_gen_fix].copy()\n\n # do the recursion until this time\n while last_gen_fix < gen:\n\n if app == 'beta_tataru':\n with np.errstate(invalid = 'ignore', divide = 'ignore'):\n scaling = (1 - self.fix_proba[0, last_gen_fix]\n - self.fix_proba[1, last_gen_fix])\n cond_mean = (\n (self.moments[0, last_gen_fix, ]\n - self.fix_proba[1, last_gen_fix, ])\n / scaling)\n cond_var = (\n (self.moments[1, last_gen_fix, ]\n + self.moments[0, last_gen_fix, ] ** 2\n - self.fix_proba[1, last_gen_fix, ])\n / scaling\n - cond_mean ** 2)\n const = cond_mean * (1 - cond_mean) / cond_var - 1\n const[scaling <= 0] = 0\n const[cond_var == 0] = 0\n cond_mean[scaling <= 0] = 0\n cond_alpha = cond_mean * const\n cond_beta = (1 - cond_mean) * const\n # this mask capture values where ss.beta is either nan or 0\n mask = (cond_alpha <= 0) | (cond_beta <= 0) | (\n ss.beta(cond_alpha, cond_beta) == 0)\n\n # p0n+1 = p0n * (1 - v) ** N + p1n * u ** N\n # + (1 - p0n - p1n) * (1 - u - v) ** N\n # * Beta(cond_alpha, cond_beta + N) / Beta(cond_alpha,\n # cond_beta)\n next_p0 = (prev_p0 * (\n 1 - self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :]) ** self.N[np.newaxis,\n :, np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]\n + prev_p1 * (\n self.u[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :,\n np.newaxis] ** self.N[np.newaxis,\n :, np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]))\n\n with np.errstate(invalid = 'ignore', divide = 'ignore'):\n next_p0 += (\n (1 - prev_p1 - prev_p0)\n * (1 - self.u[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :, np.newaxis]\n - self.v[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis,\n np.newaxis, :]) ** self.N[np.newaxis,\n :,\n np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]\n * ss.beta(cond_alpha, cond_beta + self.N[\n np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis])\n / ss.beta(cond_alpha, cond_beta))\n\n next_p0[mask] = prev_p0[mask]\n \n next_p1 = (prev_p0 * (\n self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :]) ** self.N[np.newaxis,\n :, np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]\n + prev_p1 * (\n 1 - self.u[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :,\n np.newaxis] ** self.N[np.newaxis,\n :, np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]))\n with np.errstate(invalid = 'ignore', divide = 'ignore'):\n next_p1 += (\n (1 - prev_p1 - prev_p0)\n * (1 - self.u[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, :, np.newaxis]\n - self.v[np.newaxis, np.newaxis, np.newaxis,\n np.newaxis,\n np.newaxis, :]) ** self.N[np.newaxis,\n :,\n np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis]\n * ss.beta(cond_alpha + self.N[np.newaxis,\n :,\n np.newaxis,\n np.newaxis,\n np.newaxis,\n np.newaxis],\n cond_beta)\n / ss.beta(cond_alpha, cond_beta))\n next_p1[mask] = prev_p1[mask]\n \n elif app == 'beta_custom':\n pass\n elif app == 'beta_numerical':\n scaling = (1 - self.fix_proba[0, last_gen_fix]\n - self.fix_proba[1, last_gen_fix])\n with np.errstate(invalid = 'ignore', divide = 'ignore'):\n cond_mean = (\n (self.moments[0, last_gen_fix, ]\n - self.fix_proba[1, last_gen_fix, ])\n / scaling)\n cond_var = (\n (self.moments[1, last_gen_fix, ]\n + self.moments[0, last_gen_fix, ] ** 2\n - self.fix_proba[1, last_gen_fix, ])\n / scaling\n - cond_mean ** 2)\n cond_mean[scaling <= 0] = 0.5\n cond_var[scaling <= 0] = 0\n cond_var[cond_var < 0] = 0\n\n with np.errstate(divide = 'ignore', invalid = 'ignore'):\n const = cond_mean * (1 - cond_mean) / cond_var - 1\n const[scaling <= 0] = 0\n const[cond_var == 0] = 0\n cond_alpha = cond_mean * const\n cond_beta = (1 - cond_mean) * const\n mask = (cond_alpha <= 0) | (cond_beta <= 0) | (\n ss.beta(cond_alpha, cond_beta) == 0)\n \n\n next_p0 = (\n prev_p0 * (\n 1 - fit(np.zeros(shape = (1, 1, 1, 1, 1, 1, 1)))\n ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis]\n + prev_p1 * (\n 1 - fit(np.ones(shape = (1, 1, 1, 1, 1, 1, 1)))\n ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis])\n\n x = kwargs['grid'][:, np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis, np.newaxis]\n with np.errstate(over = 'ignore'):\n to_int = np.exp(\n self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis] * np.log(\n 1 - fit(x))\n + (cond_alpha - 1) * np.log(x)\n + (cond_beta - 1) * np.log(1 - x)\n - ss.betaln(cond_alpha, cond_beta))\n assert np.all((1 - fit(x) < 1))\n assert np.all((1 - fit(x) > 0))\n \n # replacing inf values by the upper bound in numpy float\n to_int[to_int == np.inf] = np.finfo(np.float64).max \n\n integrated = np.sum(\n (to_int[:-1, ] + to_int[1:, ]) / 2\n * (x[1:, ] - x[:-1, ]),\n axis = 0)\n\n\n next_p0 += (\n (1 - prev_p0 - prev_p1) * integrated)\n next_p0 = next_p0[0, ]\n \n next_p0[mask] = prev_p0[mask]\n \n next_p1 = (\n prev_p0 * (\n fit(np.zeros(shape = (1, 1, 1, 1, 1, 1, 1)))\n ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis]\n + prev_p1 * (\n fit(np.ones(shape = (1, 1, 1, 1, 1, 1, 1)))\n ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis])\n\n x = kwargs['grid'][:, np.newaxis, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis, np.newaxis]\n to_int = np.exp(\n self.N[np.newaxis, :, np.newaxis, np.newaxis,\n np.newaxis, np.newaxis] * np.log(fit(x))\n + (cond_alpha - 1) * np.log(x)\n + (cond_beta - 1) * np.log(1 - x)\n - ss.betaln(cond_alpha, cond_beta))\n\n integrated = np.sum(\n (to_int[:-1, ] + to_int[1:, ]) / 2\n * (x[1:, ] - x[:-1, ]),\n axis = 0)\n \n next_p1 += (\n (1 - prev_p0 - prev_p1) * integrated)\n next_p1 = next_p1[0, ]\n \n next_p1[mask] = prev_p1[mask]\n\n \n elif app == 'gauss_numerical':\n pass\n elif app == 'wf_exact':\n pass\n else:\n raise NotImplementedError\n\n prev_p0 = next_p0\n prev_p1 = next_p1\n\n last_gen_fix += 1\n\n if store:\n self.fix_proba[0, last_gen_fix, ] = prev_p0.copy()\n self.fix_proba[1, last_gen_fix, ] = prev_p1.copy()\n\n ret_mat = np.empty(shape = (2, *self.fix_proba.shape[2:]))\n ret_mat[0, ] = prev_p0\n ret_mat[1, ] = prev_p1\n\n return ret_mat", "def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n self.print_averages()\n return d_El, energy, var", "def moments(self):", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def run(self, niter, calc_moments=True, save_last_param=None, verbose=True,\n return_analytics=False, seed=None):\n if niter < 1:\n if verbose:\n print(\"Nothing to do here as provided arg. `niter` is {}\" \\\n .format(niter))\n # return with desired args\n out = [self.INFO_OK]\n if calc_moments:\n out.append((None, None))\n if return_analytics:\n out.append((None, None, None))\n return out if len(out) > 1 else out[0]\n\n # Get seeds for sampling in workers for each iteration\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n rng = np.random.RandomState(seed=seed)\n seeds = rng.randint(0, pystan_max_uint, size=(niter, self.K))\n\n # Localise some instance variables\n # Mean and cov of the posterior approximation\n S = self.S\n m = self.m\n # Natural parameters of the approximation\n Q = self.Q\n r = self.r\n # Natural site parameters\n Qi = self.Qi\n ri = self.ri\n # Natural site proposal parameters\n Qi2 = self.Qi2\n ri2 = self.ri2\n # Site parameter updates\n dQi = self.dQi\n dri = self.dri\n\n # Array for positive definitness checking of each cavity distribution\n posdefs = np.empty(self.K, dtype=bool)\n\n if calc_moments:\n # Allocate memory for results\n m_phi_s = np.zeros((niter, self.dphi))\n cov_phi_s = np.zeros((niter, self.dphi, self.dphi))\n\n # monitor sampling times, mean stepsizes, and max rhats, and other times\n stimes = np.zeros(niter)\n msteps = np.zeros(niter)\n mrhats = np.zeros(niter)\n othertimes = np.zeros(niter)\n\n # Iterate niter rounds\n for cur_iter in range(niter):\n self.iter += 1\n\n # Tilted distributions (parallelisable)\n # -------------------------------------\n\n if verbose:\n print(\n \"Iter {} starting. Process tilted distributions\"\n .format(self.iter)\n )\n for k in range(self.K):\n if verbose:\n sys.stdout.write(\"\\r site {}\".format(k+1)+' '*10+'\\b'*9)\n # Force flush here as it is not done automatically\n sys.stdout.flush()\n # Process the site\n if save_last_param:\n posdefs[k] = self.workers[k].tilted(\n dQi[:,:,k],\n dri[:,k],\n save_samples = save_last_param,\n seed = seeds[cur_iter, k]\n )\n else:\n posdefs[k] = self.workers[k].tilted(\n dQi[:,:,k],\n dri[:,k],\n seed = seeds[cur_iter, k]\n )\n if verbose and not posdefs[k]:\n sys.stdout.write(\"fail\\n\")\n if verbose:\n if np.all(posdefs):\n print(\"\\rAll sites ok\")\n elif np.any(posdefs):\n print(\"\\rSome sites failed and are not updated\")\n else:\n print(\"\\rEvery site failed\")\n if not np.any(posdefs):\n # all sites failed, return with desired args\n out = [self.INFO_ALL_SITES_FAIL]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n\n # Store max sampling time\n stimes[cur_iter] = max([w.last_time for w in self.workers])\n msteps[cur_iter] = max([w.last_msteps for w in self.workers])\n mrhats[cur_iter] = max([w.last_mrhat for w in self.workers])\n\n if verbose:\n print(\n \"Sampling done, max sampling time {}\"\n .format(stimes[cur_iter])\n )\n\n # measure elapsed time for othertimes\n start_othertime = time.time()\n\n # Update global approx\n # --------------------\n\n # Initial dampig factor\n df = self.df0(self.iter)\n if verbose:\n print(\"Iter {}, starting df {:.3g}\".format(self.iter, df))\n fail_printline_pos = False\n fail_printline_cov = False\n # Fail flag for pos.def enforcing\n failed_force_pos_def = False\n while True:\n # Try to update the global posterior approximation\n\n # These 4 lines could be run in parallel also\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n np.add(Qi2.sum(2, out=Q), self.Q0, out=Q)\n np.add(ri2.sum(1, out=r), self.r0, out=r)\n\n # Check for positive definiteness\n cho_Q = S\n np.copyto(cho_Q, Q)\n try:\n linalg.cho_factor(cho_Q, overwrite_a=True)\n except linalg.LinAlgError:\n # Not positive definite -> reduce damping factor\n df *= self.df_decay\n if verbose:\n fail_printline_pos = True\n sys.stdout.write(\n \"\\rNon pos. def. posterior cov, \" +\n \"reducing df to {:.3}\".format(df) +\n \" \"*5 + \"\\b\"*5\n )\n sys.stdout.flush()\n if self.iter == 1:\n if verbose:\n print(\"\\nInvalid prior.\")\n # return with desired args\n out = [self.INFO_INVALID_PRIOR]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n if df < self.df_treshold:\n if verbose:\n print(\"\\nDamping factor reached minimum.\")\n df = self.df0(self.iter)\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n if failed_force_pos_def:\n if verbose:\n print(\"Failed to force pos_def global.\")\n # return with desired args\n out = [self.INFO_DF_TRESHOLD_REACHED_CAVITY]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n failed_force_pos_def = True\n # Try to fix by forcing improper sites to proper\n posdefs.fill(0)\n for k in range(self.K):\n # Set min eigenvalue to MIN_EIG by adding to the\n # diagonal if it is smaller than MIN_EIG_TRESHOLD\n min_eig = linalg.eigvalsh(\n Qi2[:,:,k], eigvals=(0,0))[0]\n if min_eig < self.MIN_EIG_TRESHOLD:\n Qi[:,:,k].flat[::self.dphi+1] += (\n self.MIN_EIG - min_eig)\n posdefs[k] = 1\n if verbose:\n print(\"Force sites {} pos_def.\".format(\n np.nonzero(posdefs)[0]))\n continue\n\n # Cavity distributions (parallelisable)\n # -------------------------------------\n # Check positive definitness for each cavity distribution\n for k in range(self.K):\n posdefs[k] = \\\n self.workers[k].cavity(Q, r, Qi2[:,:,k], ri2[:,k])\n # Early stopping criterion (when in serial)\n if not posdefs[k]:\n break\n\n if np.all(posdefs):\n # All cavity distributions are positive definite.\n # Accept step (switch Qi-Qi2 and ri-ri2)\n temp = Qi\n Qi = Qi2\n Qi2 = temp\n temp = ri\n ri = ri2\n ri2 = temp\n self.Qi = Qi\n self.Qi2 = Qi2\n self.ri = ri\n self.ri2 = ri2\n break\n\n else:\n # Not all cavity distributions are positive definite ...\n # reduce the damping factor\n df *= self.df_decay\n if verbose:\n if fail_printline_pos:\n fail_printline_pos = False\n print()\n fail_printline_cov = True\n sys.stdout.write(\n \"\\rNon pos. def. cavity, \" +\n \"(first encountered in site {}), \"\n .format(np.nonzero(~posdefs)[0][0]) +\n \"reducing df to {:.3}\".format(df) +\n \" \"*5 + \"\\b\"*5\n )\n sys.stdout.flush()\n if df < self.df_treshold:\n if verbose:\n print(\"\\nDamping factor reached minimum.\")\n df = self.df0(self.iter)\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n if failed_force_pos_def:\n if verbose:\n print(\"Failed to force pos_def cavities.\")\n # return with desired args\n out = [self.INFO_DF_TRESHOLD_REACHED_CAVITY]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n failed_force_pos_def = True\n # Try to fix by forcing improper sites to proper\n posdefs.fill(0)\n for k in range(self.K):\n # Set min eigenvalue to MIN_EIG by adding to the\n # diagonal if it is smaller than MIN_EIG_TRESHOLD\n min_eig = linalg.eigvalsh(\n Qi2[:,:,k], eigvals=(0,0))[0]\n if min_eig < self.MIN_EIG_TRESHOLD:\n Qi[:,:,k].flat[::self.dphi+1] += (\n self.MIN_EIG - min_eig)\n posdefs[k] = 1\n if verbose:\n print(\"Force sites {} pos_def.\".format(\n np.nonzero(posdefs)[0]))\n if verbose and (fail_printline_pos or fail_printline_cov):\n print()\n\n if calc_moments:\n # Invert Q (chol was already calculated)\n # N.B. The following inversion could be done while\n # parallel jobs are running, thus saving time.\n invert_normal_params(cho_Q, r, out_A='in-place', out_b=m,\n cho_form=True)\n # Store the approximation moments\n np.copyto(m_phi_s[cur_iter], m)\n np.copyto(cov_phi_s[cur_iter], S.T)\n if verbose:\n print(\n \"Mean and std of phi[0]: {:.3}, {:.3}\"\n .format(\n m_phi_s[cur_iter,0],\n np.sqrt(cov_phi_s[cur_iter,0,0])\n )\n )\n\n # measure total time - tilted time\n othertimes[cur_iter] = time.time() - start_othertime\n\n if verbose:\n print(\"Iter {} done.\".format(self.iter))\n\n if verbose:\n print(\n \"{} iterations done\\nTotal limiting sampling time: {}\"\n .format(niter, stimes.sum())\n )\n\n # return with desired args\n out = [self.INFO_OK]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return tuple(out) if len(out) > 1 else out[0]", "def init_moments(self, sample_count):\n # Compute outputs for the input latent noise in X_noise\n X = self.GN.sample_from_model(sample_count)\n # Get the transform to apply prior to moment matching\n P = self.mom_match_proj.get_value(borrow=False)\n # Compute post-transform mean and covariance of the outputs\n mu, sigma = projected_moments(X, P, ary_type='numpy')\n # Initialize the generator network's running moment estimates \n self.GN.dist_cov.set_value(sigma.astype(theano.config.floatX))\n self.GN.dist_mean.set_value(mu.astype(theano.config.floatX))\n return", "def estimate_moment(self):\n # Due to the optimization, we may store more than k elements in the\n # sample. The following removes excessive elements if needed.\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n\n # The inclusion threshold (highest seed of element in the sample) is\n # used to compute the inclusion probabilities for the other elements\n # in the sample.\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n\n # Computes and sums the inverse-probability estimator for all keys\n # in the sample.\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n # Warns us if we may run into float precision issues.\n # TODO(ofirg): change this warning to something more robust than\n # a print (and maybe use other approximations of exp() that are\n # better for this case).\n if (count**self.sample_p) * threshold < 2.0**(-24):\n print(\"(count**self.sample_p) * threshold < 2^{-24}\")\n print((count**self.sample_p) * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * (count**self.sample_p) * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n\n return sum_estimator", "def store(self):\n store_moments = self.steps_performed % self.meas_every[0] == 0\n store_coords = self.steps_performed % self.meas_every[1] == 0\n if not (store_moments or store_coords):\n return\n Xp = np.copy(self.bunch.X[:, [1, 3]])\n self.kick(+0.5 * self.ds) # sync positions/slopes\n if store_moments:\n self.history.store_moments(self.s)\n if store_coords:\n self.history.store_coords(self.s)\n self.bunch.X[:, [1, 3]] = Xp", "def estimate_moment(self):\n # Due to the optimization, we may store more than k elements in the\n # sample. The following removes excessive elements if needed.\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n\n # The inclusion threshold (highest seed of element in the sample) is\n # used to compute the inclusion probabilities for the other elements\n # in the sample.\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n\n # Computes and sums the inverse-probability estimator for all keys\n # in the sample.\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n weight = self.func_of_freq(self.advice_obj.predict(key))\n # Warns us if we may run into float precision issues.\n # TODO(ofirg): change this warning to something more robust than\n # a print (and maybe use other approximations of exp() that are\n # better for this case).\n if weight * threshold < 2.0**(-24):\n print(\"weight * threshold < 2^{-24}\")\n print(weight * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * weight * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n\n return sum_estimator", "def calc_moments(freqs, fourier_amps, orders):\n squared_fa = np.square(fourier_amps)\n\n # Use trapzoidal integration to compute the requested moments.\n moments = [\n 2. * trapz(freqs, np.power(2 * np.pi * freqs, o) * squared_fa)\n for o in orders\n ]\n\n return moments", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def _compute_instance_moments(x):\n return torch.mean(x, dim=(2, 3), keepdim=True), torch.var(x, dim=(2, 3), keepdim=True)", "def _compute_batch_moments(x):\n return torch.mean(x, dim=(0, 2, 3), keepdim=True), torch.var(x, dim=(0, 2, 3), keepdim=True)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def build_magmom(self, list_oxidized_site_indices, list_reduced_site_indices):\n\n MAGMOM = []\n # tabulate how many sites must be reduced from every species in the variable_magnetization_dict.\n\n for i_s, site in enumerate(self.structure):\n\n random_addition = np.round( 0.02*np.random.random(1)[0]-0.01, 6)\n\n if i_s in list_oxidized_site_indices:\n m0 = self.variable_magnetization_dict['Fe']['m_reduced']\n elif i_s in list_reduced_site_indices:\n m0 = self.variable_magnetization_dict['Fe']['m_oxidized']\n else:\n m0 = 0.3\n random_addition = 0.\n\n MAGMOM.append(m0+random_addition)\n\n return MAGMOM", "def _compute_moments(self, u):\n\n # Get the moments from the parent Gaussian Markov Chain\n #u = self.parents[0].get_moments() #message_to_child()\n\n # Send only moments <X(n)> and <X(n)X(n)> but not <X(n-1)X(n)>\n return u[:2]", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals", "def run(self):\n\n # If this was a tanh model or some such thing, we're already done.\n if self.is_phenom:\n return\n if self.is_complete:\n print(\"Already ran simulation!\")\n return\n\n # Need to generate radiation backgrounds first.\n if self.pf['radiative_transfer']:\n self.medium.field.run()\n self._f_Jc = self.medium.field._f_Jc\n self._f_Ji = self.medium.field._f_Ji\n self._f_Jlw = self.medium.field._f_Jlw\n else:\n self._f_Jc = lambda z: 0.0\n self._f_Ji = lambda z: 0.0\n self._f_Jlw = lambda z: 0.0\n\n # Start timer\n t1 = time.time()\n\n tf = self.medium.tf\n self.medium._insert_inits()\n\n pb = self.pb = ProgressBar(tf, use=self.pf['progress_bar'],\n name='gs-21cm')\n\n # Lists for data in general\n self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm, \\\n self.all_RC_igm, self.all_RC_cgm = \\\n self.medium.all_t, self.medium.all_z, self.medium.all_data_igm, \\\n self.medium.all_data_cgm, self.medium.all_RCs_igm, self.medium.all_RCs_cgm\n\n # Add zeros for Ja\n for element in self.all_data_igm:\n element['Ja'] = np.zeros(self.grid.dims)\n element['Jc'] = np.zeros(self.grid.dims)\n element['Ji'] = np.zeros(self.grid.dims)\n element['Jlw'] = np.zeros(self.grid.dims)\n\n # List for extrema-finding\n self.all_dTb = self._init_dTb()\n for t, z, data_igm, data_cgm, rc_igm, rc_cgm in self.step():\n\n # Occasionally the progress bar breaks if we're not careful\n if z < self.pf['final_redshift']:\n break\n if z < self.pf['kill_redshift']:\n break\n\n # Delaying the initialization prevents progressbar from being\n # interrupted by, e.g., PrintInfo calls\n if not pb.has_pb:\n pb.start()\n\n pb.update(t)\n\n # Save data\n self.all_z.append(z)\n self.all_t.append(t)\n self.all_dTb.append(data_igm['dTb'][0])\n self.all_data_igm.append(data_igm.copy())\n self.all_RC_igm.append(rc_igm.copy())\n\n if self.pf['include_cgm']:\n self.all_data_cgm.append(data_cgm.copy())\n self.all_RC_cgm.append(rc_cgm.copy())\n\n # Automatically find turning points\n if self.pf['track_extrema']:\n if self.track.is_stopping_point(self.all_z, self.all_dTb):\n break\n\n pb.finish()\n\n self.history_igm = _sort_history(self.all_data_igm, prefix='igm_',\n squeeze=True)\n\n if self.pf['include_cgm']:\n self.history_cgm = _sort_history(self.all_data_cgm, prefix='cgm_',\n squeeze=True)\n else:\n self.history_cgm = {}\n\n self.history = self.history_igm.copy()\n self.history.update(self.history_cgm)\n\n ##\n # In the future, could do this better by only calculating Ja at\n # the end, since it a passive quantity (unless we included its\n # very small heating).\n ##\n #if self.pf['secondary_lya']:\n # xe = lambda zz: np.interp(zz, self.history['z'][-1::-1],\n # self.history['igm_e'][-1::-1])\n # self.medium.field.run(xe=xe)\n # self._f_Ja = self.medium.field._f_Ja\n # #self._f_Jlw = self.medium.field._f_Jlw\n #\n # # Fix Ja in history\n\n self.history['dTb'] = self.history['igm_dTb']\n #self.history['dTb_bulk'] = self.history['igm_dTb_bulk']\n\n self.history['Ts'] = self.history['igm_Ts']\n self.history['Jc'] = self.history['igm_Jc']\n self.history['Ji'] = self.history['igm_Ji']\n self.history['Ja'] = self.history['igm_Jc'] + self.history['igm_Ji']\n self.history['Jlw'] = self.history['igm_Jlw']\n\n # Save rate coefficients [optional]\n if self.pf['save_rate_coefficients']:\n self.rates_igm = \\\n _sort_history(self.all_RC_igm, prefix='igm_', squeeze=True)\n self.rates_cgm = \\\n _sort_history(self.all_RC_cgm, prefix='cgm_', squeeze=True)\n\n self.history.update(self.rates_igm)\n self.history.update(self.rates_cgm)\n\n self.history['t'] = np.array(self.all_t)\n self.history['z'] = np.array(self.all_z)\n\n ##\n # Optional extra radio background\n ##\n Tr = np.zeros_like(self.history['z'])\n for popid, pop in enumerate(self.pops):\n if not pop.is_src_radio:\n continue\n\n z, E, flux = self.field.get_history(popid, flatten=True)\n\n E21cm = h_p * nu_0_mhz * 1e6 / erg_per_ev\n f21 = interp1d(E, flux, axis=1, bounds_error=False,\n fill_value=0.0, force_scipy=True)\n flux_21cm = f21(E21cm)\n\n Tr += np.interp(self.history['z'], z, flux_21cm) \\\n * E21cm * erg_per_ev * c**2 / k_B / 2. / (nu_0_mhz * 1e6)**2\n\n if not np.all(Tr == 0):\n assert self.medium.parcel_igm.grid.hydr.Tbg is None\n elif self.medium.parcel_igm.grid.hydr.Tbg is not None:\n Tr = self.medium.parcel_igm.grid.hydr.Tbg(self.history['z'])\n\n self.history['Tr'] = Tr\n\n # Correct the brightness temperature if there are non-CMB backgrounds\n if not np.all(Tr == 0):\n zall = self.history['z']\n n_H = self.medium.parcel_igm.grid.cosm.nH(zall)\n Ts = self.medium.parcel_igm.grid.hydr.Ts(zall,\n self.history['igm_Tk'], self.history['Ja'],\n self.history['igm_h_2'], self.history['igm_e'] * n_H, Tr)\n\n if self.pf['floor_Ts']:\n Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=zall))\n\n # Compute volume-averaged ionized fraction\n xavg = self.history['cgm_h_2'] \\\n + (1. - self.history['cgm_h_2']) * self.history['igm_h_2']\n\n # Derive brightness temperature\n dTb = self.medium.parcel_igm.grid.hydr.get_21cm_dTb(zall, Ts,\n xavg=xavg, Tr=Tr)\n\n self.history['dTb_no_radio'] = self.history['dTb'].copy()\n self.history['dTb'] = dTb\n\n #self.history['dTb_bulk'] = \\\n # self.medium.parcel_igm.grid.hydr.dTb(zall, 0.0, Ts, Tr)\n\n t2 = time.time()\n\n self.timer = t2 - t1\n\n self.is_complete = True", "def _compute_pooled_moments(x, alpha, batch_mean, batch_var, augment_moment_fn):\n augment_mean, augment_var = augment_moment_fn(x)\n pooled_mean = alpha * batch_mean + (1.0 - alpha) * augment_mean\n batch_mean_diff = batch_mean - pooled_mean\n augment_mean_diff = augment_mean - pooled_mean\n pooled_var = alpha * (batch_var + (batch_mean_diff * batch_mean_diff)) +\\\n (1.0 - alpha) * (augment_var + (augment_mean_diff * augment_mean_diff))\n return pooled_mean, pooled_var", "def do_mf_updates(self, num_steps, report = False):\n \n output_vars =[]\n \n if self.num_hidden == 0:\n \n if report:\n \n output_vars.append(T.mean(T.log(self.mf_vis_p)))\n \n update_funct = theano.function(inputs =[],\n outputs = output_vars,\n updates = [(self.mf_vis_p,\\\n self.mf_updates)])\n \n for step in range(num_steps):\n if report:\n avg_log_mf = update_funct() \n print(\"Step %d: average value of MF parameter --- %f\"%\n (step, avg_log_mf[0]))\n else:\n update_funct()\n \n elif self.num_hidden > 0: \n \n if report:\n \n output_vars.append(T.mean(T.log(self.mf_vis_p)))\n \n output_vars.append(T.mean(T.log(self.mf_hid_p)))\n \n updates = OrderedDict([(self.mf_vis_p, self.mf_vis_updates),\n (self.mf_hid_p, self.mf_hid_updates)])\n \n update_funct = theano.function(inputs = [],\n outputs = output_vars,\n updates = updates)\n \n for step in range(num_steps):\n if report:\n avg_log_vis, avg_log_hid = update_funct() \n print(\"Step %d: average value of visible MF parameter --- %f\"%\n (step, avg_log_vis))\n print(\"Step %d: average value of hidden MF parameter --- %f\"%\n (step, avg_log_hid))\n else:\n update_funct()", "def compute_and_store(self, waveforms, *args, **kwargs):\n\n samplerate = kwargs.pop(\"samplerate\", 16000)\n temporal_offset = kwargs.pop(\"temporal_offset\", 0)\n # append = kwargs.pop(\"append\", False)\n append = False\n start = 0\n with h5py.File(self.filename, \"a\") as hf:\n logger.info(\"Starting loop through %d waveforms\" % len(waveforms))\n for ii, waveform in enumerate(waveforms):\n waveform, fs, filename = self.load_waveform(waveform, samplerate)\n kwargs[\"samplerate\"] = fs\n logger.info(\"%d) Computing transform\" % ii)\n outputs = self.compute(waveform, *args, **kwargs)\n\n if isinstance(outputs, tuple):\n outputs = list(outputs)\n else:\n outputs = [outputs]\n filename = [filename]\n\n self.store_data(outputs, filenames=filename)", "def build_magmom(self,list_oxidizable_site_indices):\n\n MAGMOM = []\n # tabulate how many sites must be reduced from every species in the variable_magnetization_dict.\n reduction_counter = {}\n for key in self.variable_magnetization_dict:\n reduction_counter[key] = self.variable_magnetization_dict[key]['n_reduced']\n\n dict_reduction = {}\n #reduce according to proximity\n for i_s in list_oxidizable_site_indices:\n symbol = self.structure.sites[i_s].specie.symbol\n \n if reduction_counter[symbol] > 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_reduced']\n reduction_counter[symbol] -= 1\n elif reduction_counter[symbol] == 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_oxidized']\n\n else:\n print(\"SOMETHING IS WRONG. REVIEW CODE!\")\n sys.exit()\n\n for i_s, site in enumerate(self.structure):\n if i_s in dict_reduction:\n # add a bit of randomness to not get trapped in metastable solution.\n # It is quite useless to have a random number with 16 decimals, and it \n # makes the INCAR ugly; let's round.\n random_addition = np.round( 0.2*np.random.random(1)[0]-0.1, 6)\n MAGMOM.append(dict_reduction[i_s]+random_addition)\n else:\n MAGMOM.append(0.6)\n\n return MAGMOM", "def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):\n\n # flag\n init_mult = 1000\n\n T = self.num_time_slices\n\n mean = self.mean[word]\n variance = self.variance[word]\n\n # only used for DIM mode\n # w_phi_l = self.w_phi_l[word]\n # m_update_coeff = self.m_update_coeff[word]\n\n # temp_vector holds temporary zeta values\n self.temp_vect = np.zeros(T)\n\n for u in range(T):\n self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)\n\n for t in range(T):\n mean_deriv = mean_deriv_mtx[t]\n term1 = 0\n term2 = 0\n term3 = 0\n term4 = 0\n\n for u in range(1, T + 1):\n mean_u = mean[u]\n mean_u_prev = mean[u - 1]\n dmean_u = mean_deriv[u]\n dmean_u_prev = mean_deriv[u - 1]\n\n term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)\n term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u\n\n model = \"DTM\"\n if model == \"DIM\":\n # do some stuff\n pass\n\n if self.chain_variance:\n term1 = - (term1 / self.chain_variance)\n term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)\n else:\n term1 = 0.0\n\n deriv[t] = term1 + term2 + term3 + term4\n\n return deriv", "def compute_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # loop over all the complexes in the database\n first = True\n for comp in tqdm(self.index_complexes):\n fname, molname = comp[0], comp[1]\n\n # get the feature/target\n if self.mapfly:\n feature, target = self.map_one_molecule(\n fname, mol=molname)\n else:\n feature, target = self.load_one_molecule(\n fname, mol=molname)\n\n # create the norm isntances at the first passage\n if first:\n self.param_norm = {'features': [], 'targets': None}\n for ifeat in range(feature.shape[0]):\n self.param_norm['features'].append(NormParam())\n self.param_norm['targets'] = MinMaxParam()\n first = False\n\n # update the norm instances\n for ifeat, mat in enumerate(feature):\n self.param_norm['features'][ifeat].add(\n np.mean(mat), np.var(mat))\n self.param_norm['targets'].update(target)\n\n # process the std of the features and make array for fast access\n nfeat, ncomplex = len(\n self.param_norm['features']), len(self.index_complexes)\n self.feature_mean, self.feature_std = [], []\n for ifeat in range(nfeat):\n\n # process the std and check\n self.param_norm['features'][ifeat].process(ncomplex)\n if self.param_norm['features'][ifeat].std == 0:\n logger.info(' Final STD Null. Changed it to 1')\n self.param_norm['features'][ifeat].std = 1\n\n # store as array for fast access\n self.feature_mean.append(\n self.param_norm['features'][ifeat].mean)\n self.feature_std.append(\n self.param_norm['features'][ifeat].std)\n\n self.target_min = self.param_norm['targets'].min[0]\n self.target_max = self.param_norm['targets'].max[0]\n\n logger.info(f'{self.target_min}, {self.target_max}')", "def simulate(self):\n observations = [\n np.random.normal(\n self.mu + absolute_effect\n ,self.sigma\n ,int(self.sample_size * self.test_splits[i])\n )\n for i, absolute_effect in enumerate(self.absolute_effects)\n ]\n\n effect_point_estimates = [\n round(test_observations.mean()-self.mu, 4)\n for test_observations in observations\n ]\n f_stat, p_value = f_oneway(*observations)\n return f_stat, p_value, effect_point_estimates" ]
[ "0.5578582", "0.5424871", "0.5365875", "0.5365875", "0.53259766", "0.5321996", "0.5301589", "0.5287845", "0.5287845", "0.52732825", "0.5256292", "0.5183499", "0.5171409", "0.51407486", "0.5116536", "0.5102273", "0.50714815", "0.5045087", "0.5026461", "0.49676678", "0.4946218", "0.49283117", "0.49125037", "0.49045932", "0.4884918", "0.48695886", "0.48687363", "0.48407674", "0.48229927", "0.4814907" ]
0.7055621
0
this method compute the approximated fixations probabilities of a wrightfisher process after gen generations. app is a string indicating how to approximate transitions and integrate them in these computations. If store is true, all probabilities used in the computations are stored is the fix_proba attribute such that self.fix_proba[i, j, k, l, m, n, o, p] is the probability for the wrightfisher process to be fixed (in 0 for i = 0 or in 1 for i = 1) after j generations from an initial frequency
def compute_fixations(self, gen, app = 'beta_tataru', store = True, **kwargs): # setting the approximation recursion self.app = app # the last fixation probability computed is last_gen_fix = self.fix_proba.shape[1] - 1 # if the probability is already computed, return it if last_gen_fix >= gen: return self.fix_proba[:, gen, ] # the last moment generation already computed is last_gen_mom = self.moments.shape[1] - 1 # approximated moments until this time are needed if gen > last_gen_mom: self.compute_moments(gen = gen, store = True) # if store is true, increase the self.fix_proba matrix size to store # the new computations if store: fix_proba_t = np.full(shape = (2, gen + 1, *self.fix_proba.shape[2:]), fill_value = np.nan) fix_proba_t[:, :(last_gen_fix + 1), ] = self.fix_proba self.fix_proba = fix_proba_t # getting the fitness function and it's derivatives fit = self.fitness[0] fit1 = self.fitness[1] fit2 = self.fitness[2] prev_p0 = self.fix_proba[0, last_gen_fix].copy() prev_p1 = self.fix_proba[1, last_gen_fix].copy() # do the recursion until this time while last_gen_fix < gen: if app == 'beta_tataru': with np.errstate(invalid = 'ignore', divide = 'ignore'): scaling = (1 - self.fix_proba[0, last_gen_fix] - self.fix_proba[1, last_gen_fix]) cond_mean = ( (self.moments[0, last_gen_fix, ] - self.fix_proba[1, last_gen_fix, ]) / scaling) cond_var = ( (self.moments[1, last_gen_fix, ] + self.moments[0, last_gen_fix, ] ** 2 - self.fix_proba[1, last_gen_fix, ]) / scaling - cond_mean ** 2) const = cond_mean * (1 - cond_mean) / cond_var - 1 const[scaling <= 0] = 0 const[cond_var == 0] = 0 cond_mean[scaling <= 0] = 0 cond_alpha = cond_mean * const cond_beta = (1 - cond_mean) * const # this mask capture values where ss.beta is either nan or 0 mask = (cond_alpha <= 0) | (cond_beta <= 0) | ( ss.beta(cond_alpha, cond_beta) == 0) # p0n+1 = p0n * (1 - v) ** N + p1n * u ** N # + (1 - p0n - p1n) * (1 - u - v) ** N # * Beta(cond_alpha, cond_beta + N) / Beta(cond_alpha, # cond_beta) next_p0 = (prev_p0 * ( 1 - self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] + prev_p1 * ( self.u[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :, np.newaxis] ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis])) with np.errstate(invalid = 'ignore', divide = 'ignore'): next_p0 += ( (1 - prev_p1 - prev_p0) * (1 - self.u[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :, np.newaxis] - self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] * ss.beta(cond_alpha, cond_beta + self.N[ np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis]) / ss.beta(cond_alpha, cond_beta)) next_p0[mask] = prev_p0[mask] next_p1 = (prev_p0 * ( self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] + prev_p1 * ( 1 - self.u[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :, np.newaxis] ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis])) with np.errstate(invalid = 'ignore', divide = 'ignore'): next_p1 += ( (1 - prev_p1 - prev_p0) * (1 - self.u[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :, np.newaxis] - self.v[np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] * ss.beta(cond_alpha + self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis], cond_beta) / ss.beta(cond_alpha, cond_beta)) next_p1[mask] = prev_p1[mask] elif app == 'beta_custom': pass elif app == 'beta_numerical': scaling = (1 - self.fix_proba[0, last_gen_fix] - self.fix_proba[1, last_gen_fix]) with np.errstate(invalid = 'ignore', divide = 'ignore'): cond_mean = ( (self.moments[0, last_gen_fix, ] - self.fix_proba[1, last_gen_fix, ]) / scaling) cond_var = ( (self.moments[1, last_gen_fix, ] + self.moments[0, last_gen_fix, ] ** 2 - self.fix_proba[1, last_gen_fix, ]) / scaling - cond_mean ** 2) cond_mean[scaling <= 0] = 0.5 cond_var[scaling <= 0] = 0 cond_var[cond_var < 0] = 0 with np.errstate(divide = 'ignore', invalid = 'ignore'): const = cond_mean * (1 - cond_mean) / cond_var - 1 const[scaling <= 0] = 0 const[cond_var == 0] = 0 cond_alpha = cond_mean * const cond_beta = (1 - cond_mean) * const mask = (cond_alpha <= 0) | (cond_beta <= 0) | ( ss.beta(cond_alpha, cond_beta) == 0) next_p0 = ( prev_p0 * ( 1 - fit(np.zeros(shape = (1, 1, 1, 1, 1, 1, 1))) ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] + prev_p1 * ( 1 - fit(np.ones(shape = (1, 1, 1, 1, 1, 1, 1))) ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis]) x = kwargs['grid'][:, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis] with np.errstate(over = 'ignore'): to_int = np.exp( self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] * np.log( 1 - fit(x)) + (cond_alpha - 1) * np.log(x) + (cond_beta - 1) * np.log(1 - x) - ss.betaln(cond_alpha, cond_beta)) assert np.all((1 - fit(x) < 1)) assert np.all((1 - fit(x) > 0)) # replacing inf values by the upper bound in numpy float to_int[to_int == np.inf] = np.finfo(np.float64).max integrated = np.sum( (to_int[:-1, ] + to_int[1:, ]) / 2 * (x[1:, ] - x[:-1, ]), axis = 0) next_p0 += ( (1 - prev_p0 - prev_p1) * integrated) next_p0 = next_p0[0, ] next_p0[mask] = prev_p0[mask] next_p1 = ( prev_p0 * ( fit(np.zeros(shape = (1, 1, 1, 1, 1, 1, 1))) ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] + prev_p1 * ( fit(np.ones(shape = (1, 1, 1, 1, 1, 1, 1))) ) ** self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis]) x = kwargs['grid'][:, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis] to_int = np.exp( self.N[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis] * np.log(fit(x)) + (cond_alpha - 1) * np.log(x) + (cond_beta - 1) * np.log(1 - x) - ss.betaln(cond_alpha, cond_beta)) integrated = np.sum( (to_int[:-1, ] + to_int[1:, ]) / 2 * (x[1:, ] - x[:-1, ]), axis = 0) next_p1 += ( (1 - prev_p0 - prev_p1) * integrated) next_p1 = next_p1[0, ] next_p1[mask] = prev_p1[mask] elif app == 'gauss_numerical': pass elif app == 'wf_exact': pass else: raise NotImplementedError prev_p0 = next_p0 prev_p1 = next_p1 last_gen_fix += 1 if store: self.fix_proba[0, last_gen_fix, ] = prev_p0.copy() self.fix_proba[1, last_gen_fix, ] = prev_p1.copy() ret_mat = np.empty(shape = (2, *self.fix_proba.shape[2:])) ret_mat[0, ] = prev_p0 ret_mat[1, ] = prev_p1 return ret_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def propose(self):\n\n\n fb = 0.0\n changed_any = False\n\n while not changed_any:\n new = copy(self) ## Now we just copy the whole thing\n\n for w in self.all_words():\n if flip(self.propose_p):\n try:\n xp, xfb = self.get_word(w).propose()\n\n changed_any = True\n new.set_word(w, xp)\n fb += xfb\n\n except ProposalFailedException:\n pass\n\n\n return new, fb", "def update(self):\n\n # get states, actions, rewards and total timesteps from memory\n states, actions, R, T = self.memory.get()\n n_ep = len(R)\n\n # compute value estimates for the states\n v = self.critic(states)\n\n # compute advantages (using GAE) and rewards to go\n A, rtg = utils.gae_rtg((R, v, T), self.gam, self.lam)\n\n # store the initial version of both the policy and the log probs of the\n # actions for later comparison with the future versions (needed for PPO)\n policy_old = copy.deepcopy(self.policy)\n log_probs_old = policy_old(states).log_prob(actions)\n\n # sample from a batch of experiences\n # (\"_\" subscript indicates \"sampled from\")\n for (v_, A_, rtg_, log_probs_old_), i in utils.sample_batch((v, A, rtg, log_probs_old), self.batch_size, self.policy_updates):\n log_probs_ = self.policy(states).log_prob(actions)[i]\n\n # estimate ratio between the new log probs and the old ones\n r_ = torch.exp(log_probs_ - log_probs_old_)\n\n l_1 = r_ * A_\n l_2 = torch.clamp(r_, 1-self.eps, 1+self.eps) * A_\n\n # TODO: implement entropy\n # TODO: merge policy and critic\n\n # surragate loss function for PPO\n l_clip = -torch.mean(torch.min(l_1, l_2))\n\n # update the policy\n self.policy_optimizer.zero_grad()\n l_clip.backward(retain_graph=True)\n self.policy_optimizer.step()\n\n # sample a batch of value estimates and the corresponding rewards to go\n # to update the value function.\n for (v_, rtg_), _ in utils.sample_batch((v, rtg), self.batch_size, self.v_updates):\n # compute the loss\n critic_loss = F.mse_loss(v_, rtg_)\n\n # update the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward(retain_graph=True)\n self.critic_optimizer.step()\n\n # clear the memory. PPO is an On-Policy method so we don't need these\n # memories anymore\n self.memory.clear()\n\n # return the loss of the value function for display\n return F.mse_loss(v, rtg)", "def enablePileUpCorrection( process, postfix, sequence='patPF2PATSequence'):\n\n enablePileUpCorrectionInPF2PAT( process, postfix, sequence)\n enablePileUpCorrectionInPAT( process, postfix, sequence)", "def doPopEvaluation(self, is_train):\n if is_train:\n my_type = \"TRAINING\"\n else:\n my_type = \"TESTING\"\n no_match = 0 # How often does the population fail to have a classifier that matches an instance in the data.\n tie = 0 # How often can the algorithm not make a decision between classes due to a tie.\n cons.env.resetDataRef( is_train ) # Go to the first instance in dataset\n phenotype_list = cons.env.format_data.action_list\n #----------------------------------------------\n class_accuracies = {}\n for each in phenotype_list:\n class_accuracies[each] = ClassAccuracy()\n #----------------------------------------------\n if is_train:\n instances = cons.env.format_data.numb_train_instances\n else:\n instances = cons.env.format_data.numb_test_instances\n #----------------------------------------------------------------------------------------------\n for _ in range(instances):\n if is_train:\n state_action = cons.env.getTrainInstance()\n else:\n state_action = cons.env.getTestInstance()\n #-----------------------------------------------------------------------------\n self.population.makeEvalMatchSet(state_action[0])\n prediction = Prediction(self.population, True)\n selected_action = prediction.getDecision()\n #-----------------------------------------------------------------------------\n\n if selected_action == None:\n no_match += 1\n elif selected_action == 'Tie':\n tie += 1\n else: #Instances which failed to be covered are excluded from the accuracy calculation\n for each in phenotype_list:\n is_correct = False\n accurate_action = False\n right_action = state_action[1]\n if each == right_action:\n is_correct = True\n if selected_action == right_action:\n accurate_action = True\n class_accuracies[each].updateAccuracy(is_correct, accurate_action)\n\n self.population.clearSets()\n #----------------------------------------------------------------------------------------------\n #Calculate Standard Accuracy--------------------------------------------\n correct_cases = class_accuracies[phenotype_list[0]].T_myClass + class_accuracies[phenotype_list[0]].T_otherClass\n incorrect_cases = class_accuracies[phenotype_list[0]].F_myClass + class_accuracies[phenotype_list[0]].F_otherClass\n accuracy = float(correct_cases) / float(correct_cases + incorrect_cases)\n\n #Calculate Balanced Accuracy---------------------------------------------\n T_mySum = 0\n T_otherSum = 0\n F_mySum = 0\n F_otherSum = 0\n for each in phenotype_list:\n T_mySum += class_accuracies[each].T_myClass\n T_otherSum += class_accuracies[each].T_otherClass\n F_mySum += class_accuracies[each].F_myClass\n F_otherSum += class_accuracies[each].F_otherClass\n balanced_accuracy = ((0.5*T_mySum / (float(T_mySum + F_otherSum)) + 0.5*T_otherSum / (float(T_otherSum + F_mySum)))) # BalancedAccuracy = (Specificity + Sensitivity)/2\n\n #Adjustment for uncovered instances - to avoid positive or negative bias we incorporate the probability of guessing a phenotype by chance (e.g. 50% if two phenotypes)\n prediction_fail = float(no_match)/float(instances)\n prediction_ties = float(tie)/float(instances)\n covered_instances = 1.0 - prediction_fail\n prediction_made = 1.0 - (prediction_fail + prediction_ties)\n\n adjusted_accuracy = (accuracy * prediction_made) + ((1.0 - prediction_made) * (1.0 / float(len(phenotype_list))))\n adjusted_balanced_accuracy = (balanced_accuracy * prediction_made) + ((1.0 - prediction_made) * (1.0 / float(len(phenotype_list))))\n\n #Adjusted Balanced Accuracy is calculated such that instances that did not match have a consistent probability of being correctly classified in the reported accuracy.\n print(\"-----------------------------------------------\")\n print(str(my_type)+\" Accuracy Results:-------------\")\n print(\"Instance Coverage = \"+ str(covered_instances*100.0)+ '%')\n print(\"Prediction Ties = \"+ str(prediction_ties*100.0)+ '%')\n print(str(correct_cases) + ' out of ' + str(instances) + ' instances covered and correctly classified.')\n print(\"Standard Accuracy (Adjusted) = \" + str(adjusted_accuracy))\n print(\"Balanced Accuracy (Adjusted) = \" + str(adjusted_balanced_accuracy))\n #Balanced and Standard Accuracies will only be the same when there are equal instances representative of each phenotype AND there is 100% covering.\n result = [adjusted_balanced_accuracy, covered_instances]\n return result", "def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def policy_eval():\r\n \r\n action_prob = [0.125, 0.625, 0.125, 0.125]# actions with probabilities\r\n data = grid_world()\r\n state_axis = np.zeros((9, 9))#initialize states\r\n threshold = .1\r\n prior_state = np.ones((9, 9))\r\n \r\n while np.abs(state_axis - prior_state).max() > threshold:\r\n for x, y in product(range(9), repeat=2):\r\n prior_state = state_axis.copy()\r\n if data.array[x, y] == 'X':\r\n continue\r\n updated_values = [data.next_direction(np.array([x, y]), next_move)\r\n for next_move in data.directions]#Updating states with directions\r\n Sum_Expectation = np.dot(action_prob,\r\n [points_val + 0.9 * state_axis[position[0], position[1]]\r\n for position, points_val in updated_values])\r\n state_axis[x, y] = Sum_Expectation\r\n print(\"\\nExercise 3.1 Shows Value functions for the policy\\n\")\r\n print(state_axis)\r\n build_grid(state_axis, \"Shows Value functions for the policy\")", "def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def predict_proba_image(self, array_data, w_x, w_y):\n\t\tall_proba = []\n\n\t\tif(self.use_geodesic):\n\t\t\tpan = array_data[0]\n\t\t\tself.geodesic_cost = nd.gaussian_gradient_magnitude(pan, self.geodesic_sigma)\n\n\n\t\tif(self.n_steps_simple is None and self.n_steps_proba is None):\n\t\t\tfor i in range(self.n_forests):\n\t\t\t\tif ((i != 0) and self.add_previous_prob):\n\t\t\t\t\tif(self.use_geodesic):\n\t\t\t\t\t\tproba = self.geodesic(proba)\n\t\t\t\t\tarray_data = numpy.concatenate((array_data,proba))\n\t\t\t\tproba = self.forests_[i].predict_proba_image(array_data, w_x, w_y)\n\t\t\t\tall_proba.append(proba)\n\t\telse:\n\t\t\ti = 0\n\t\t\tdone = True\n\t\t\tfor step_proba in range(self.n_steps_proba):\n\t\t\t\tfor step_simple in range(self.n_steps_simple):\n\t\t\t\t\tif (step_proba != 0) and (step_simple == 0):\n\t\t\t\t\t\tproba = self.forests_[i].predict_proba_image(array_data, w_x, w_y)\n\t\t\t\t\t\tif(self.use_geodesic):\n\t\t\t\t\t\t\tproba = self.geodesic(proba)\n\t\t\t\t\t\t#if use_geodesic\n\t\t\t\t\t\tarray_data = numpy.concatenate((array_data,proba))\n\t\t\t\t\t#if (step_proba != 0) and (step_simple=0):\n\t\t\t\t\tproba = self.forests_[i].predict_proba_image(array_data, w_x, w_y)\n\t\t\t\t\ti +=1\n\t\t\t\t#for step_simple\n\t\t\t#for step_proba\n\n\t\tif(self.fusion == \"mean\"):\n\t\t\tfor j in range(1, len(all_proba)):\n\t\t\t\tproba += all_proba[j]\n\t\t\treturn proba / self.n_forests\n\t\telse: # (fusion ==\"last\"):\n\t\t\treturn proba", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def create_program(fe: FitnessEvaluator, max_len: int) -> str:\n\n # mut_prob = {\"<\": 0.8, \">\": 0.8, \"+\": 0.6, \"-\": 0.6, \"[\": 0.1, \"]\": 0.1}\n\n # new_population: List[Program] = []\n\n # k = 1000\n # N = 0.5 # N is top percentile for selection process\n\n converges = True\n gen_no = 0\n\n while 1:\n k = 1000 # k represents the initial population size\n gen_no = gen_no + 1\n print(gen_no)\n if gen_no == 100:\n converges = True\n gen_no = 0\n\n # generate initial random, score initial random, add to population\n if converges:\n converges = False\n population: List[Program] = []\n res = generate_random(fe, max_len, k, population)\n if res != \"\":\n # print(\"from RANDOM\")\n return res\n\n new_population: List[Program] = []\n ct = [0]\n\n while ct[0] != k:\n weights = populate_weights(k, population)\n\n population.sort(key=lambda program: program.score)\n\n selected = random.choices(population, weights=weights, k=k//2)\n selected.sort(key=lambda program: program.score)\n\n if bad_average(selected):\n k = 0\n converges = True\n gen_no = False\n break\n\n res = select(new_population, selected, fe, k//2, ct)\n if res != \"\":\n return res\n\n for i in range(k):\n population[i] = new_population[i]", "def edit_probs(result):\n for i in range(TOP_E):\n p = result.data[i][1]\n p = round(p, 4)\n # p_str = str(p)[1:]\n result.data[i][1] = p\n\n return result", "def render(self, wfe=True):\n # optimization (or not):\n # actuators is small, say 40x40\n # while poke_arr is ~= 10x the resolution (400x400)\n #\n # it is most optimal to set the values of poke_arr based on the mask\n # however, for such small arrays it makes little difference and the\n # code appears much less expressive\n # what is here is ~99.1% of the speed with better legibility\n\n # potential \"bug\" - it is assumed the content of actuators_work\n # where the actuators are masked off is zero, or whatever the desired\n # sticking value is. If the expected behavior for masked actuators\n # changes over the life of this instance, the user may be surprised\n # OTOH, it may be a \"feature\" that stuck actuators, etc, may be\n # adjusted in this way rather elegantly\n self.poke_arr[self.iyy, self.ixx] = self.actuators\n\n # self.dx is unused inside apply tf, but :shrug:\n sfe = apply_transfer_functions(self.poke_arr, None, self.tf, shift=False)\n if self.needs_rot:\n warped = warp(sfe, self.projx, self.projy)\n else:\n warped = sfe\n if wfe:\n warped *= (2*self.obliquity)\n\n if self.upsample != 1:\n warped = fourier_resample(warped, self.upsample)\n\n self.Nintermediate = warped.shape\n\n if warped.shape[0] < self.Nout[0]:\n # need to pad\n warped = pad2d(warped, out_shape=self.Nout)\n elif warped.shape[0] > self.Nout[1]:\n warped = crop_center(warped, out_shape=self.Nout)\n\n return warped", "def optimize_weights(self, generations):\n for gen in range(generations):\n print(\" Generation: %s\" % gen)\n self._pop_f1 = 0\n self._queue_search(self.population)\n self._queue.join()\n self._scores = {}\n while not self._results.empty():\n (index, f1) = self._results.get()\n self._scores[index] = f1\n self._pop_f1 += f1\n ranks = sorted(range(self.population_size), key=lambda s: (self._scores.get(s)))\n self._report(ranks)\n self._next_generation(ranks)", "def process(inpt, num_gens, display=False):\n # first line is initial state\n r = re.compile(\"([.#]{5}) => ([.#])\")\n \n state = inpt[0][15:].rstrip(\"\\n\")\n first_n = 0\n rules = dict(tuple(r.match(row).groups()) for row in inpt[2:])\n gen = 0\n print(rules)\n\n print(gen,first_n,state)\n\n states = [(first_n, state, gen)]\n min_n = -20\n s = None\n \n for i in tqdm.tqdm(range(num_gens)):\n state, first_n = generation(state, first_n, rules)\n gen += 1\n min_n = min(min_n, first_n)\n s = score(state, first_n)\n if display:\n print (s, ('.' * (first_n - min_n)) + state)\n else:\n print(s)\n\n return s", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def cal_shap_process(period, config):\n print(\"Shapley calculation begins in periods: {} >>> pid={}, ppid={}\".format(period, os.getpid(), os.getppid()))\n save_path = gen_path(config.shapley_path, filename=str(period) + '.npy')\n print('saving to', save_path)\n data_dict = generate_dataloader(config, period, val_length=config.val_length, train_length=config.train_length)\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n model.load_state_dict(\n torch.load(gen_path(config.all_factor_path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n\n dataloader_train = data_dict['train'][0]\n train_iterator = iter(dataloader_train)\n dataloader_val = data_dict['val'][0]\n val_iterator = iter(dataloader_val)\n (_train_X, _, _) = next(train_iterator)\n background = torch.Tensor(_train_X).to(config.device)\n (_val_X, _, _) = next(val_iterator)\n sample = torch.Tensor(_val_X).to(config.device)\n e = shap.DeepExplainer(model, background)\n shap_values = e.shap_values(sample)\n features_shap = shap_values.mean(axis=0)\n sort_index = np.argsort(-np.abs(features_shap)).astype(int)\n\n save_txt = gen_path(config.shapley_path, filename=str(period) + '.txt')\n print('saving to ', save_txt)\n np.savetxt(save_txt, sort_index, fmt='%d')\n np.savetxt(save_txt.replace('.txt', '.npy'), sort_index, fmt='%d')\n\n del data_dict\n del model\n torch.cuda.empty_cache()\n print('--------Period {:d} shapley values calculation finished--------'.format(period))\n print(\"Shapley calculation process ends in periods: {}>>>\".format(period))", "def generate_and_store_game_by_policy(self, policy):\n while self.ended != True:\n actions = self.search_possible_steps()\n next_states = self.get_next_states()\n action = policy.pai(self.board)\n # next_state = self.play(action)\n # next_value = db.find_value(next_state)\n print(self.board)", "def compute_pg_loss(asv_predictions, cm_predictions, targets, is_spoof, args):\n\n # Sample actions\n asv_actions = None\n cm_actions = None\n\n if args.deterministic:\n # Threshold at 0.5\n asv_actions = asv_predictions > 0.5\n cm_actions = cm_predictions > 0.5\n else:\n # Stochastic actions\n asv_actions = torch.rand_like(asv_predictions) < asv_predictions\n cm_actions = torch.rand_like(cm_predictions) < cm_predictions\n\n # Only pass user when both are good with passing the user\n joint_action = asv_actions & cm_actions\n\n # Check if actions were TA/TR/FA/FR, and reward accordingly\n reward_model = REWARD_MODELS[args.reward_model]\n targets = targets.bool()\n is_spoof = is_spoof.bool()\n rewards = torch.zeros_like(targets).float()\n\n # True accept (both are one)\n rewards[joint_action & targets] = reward_model.ta\n # True reject (both are zero)\n rewards[(~joint_action) & (~targets)] = reward_model.tr\n # False accept (action is true but target is zero), non-targets\n rewards[((joint_action & (~targets)) & (~is_spoof))] = reward_model.fa\n # False accept (action is true but target is zero), spoofed samples\n rewards[((joint_action & (~targets)) & is_spoof)] = reward_model.fa_spoof\n # False reject (action is neg but target is one)\n rewards[(~joint_action) & targets] = reward_model.fr\n\n # Apply priors\n if args.priors:\n rewards[targets] *= reward_model.p_target\n rewards[(~targets) & (~is_spoof)] *= reward_model.p_nontarget\n rewards[is_spoof] *= reward_model.p_spoof\n\n # Compute PG loss:\n # E[log pi * R]\n # We only have one step in our \"MDP\", so no need to\n # discount or anything here.\n # Assumption: ASV and CM actions are independent\n\n # Case 1: joint_action is true\n # Joint probability is asv_prediction * cm_prediction,\n # as these values are directly \"probability of selecting true\"\n # Case 2: joint_action is false\n # Either asv or cm action was false or both were false.\n # => not_asv * cm + asv * not_cm + not_asv * not_cm\n\n # First calculate Case 2 for all and then replace\n # correct parts with Case 1 calculation\n joint_action_p = (\n (1 - asv_predictions) * cm_predictions +\n asv_predictions * (1 - cm_predictions) +\n (1 - asv_predictions) * (1 - cm_predictions)\n )\n # Replace with Case 1\n joint_action_p[joint_action] = (\n (asv_predictions * cm_predictions)[joint_action]\n )\n\n # Clip probabilities\n if args.clip_probability:\n # Where rewards are positive, clip from above\n high_p_idx = (joint_action_p > PROBABILITY_CLIP) & (rewards > 0)\n low_p_idx = (joint_action_p < (1 - PROBABILITY_CLIP)) & (rewards < 0)\n joint_action_p[high_p_idx] = PROBABILITY_CLIP\n joint_action_p[low_p_idx] = (1 - PROBABILITY_CLIP)\n\n # Standardize rewards\n if args.standardize_rewards:\n rewards = (rewards - torch.mean(rewards)) / torch.std(rewards)\n\n # PG objective. Note that this is supposed to be maximized\n pg_objective = torch.log(joint_action_p + 1e-5) * rewards\n # Optimizer outside this function will try to minimized\n # returned value. Maximizing objective == minimizing\n # negative of it.\n pg_loss = -pg_objective\n\n # Mean over batch\n pg_loss = torch.mean(pg_loss)\n\n return pg_loss", "def interpolation_trigram_model(list_of_words, unigram_count, bigram_count, trigram_count, N=count_token(), lambda1=None, lambda2=None, lambda3=None):\n\n # A modifier\n assert 0 < lambda3 <= 1, \"wrong value\"\n assert 0 < lambda2 <= 1, \"wrong value\"\n assert 0 < lambda1 <= 1, \"wrong value\"\n assert 0 < lambda1 + lambda2 + lambda3 <= 1, \"wrong value\"\n c_start = list_of_words.count(start_phrase)\n c_end = list_of_words.count(end_phrase)\n if c_start == 0:\n list_of_words.insert(0, start_phrase)\n list_of_words.insert(0, start_phrase)\n if c_start == 1:\n list_of_words.insert(0, start_phrase)\n if c_end == 0:\n list_of_words.append(end_phrase)\n list_of_words.append(end_phrase)\n if c_end == 1:\n list_of_words.append(end_phrase)\n uni_count = pd.read_csv(unigram_count)\n bigram_count = pd.read_csv(bigram_count)\n trigram_count = pd.read_csv(trigram_count)\n\n proba_dict = {list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2]:\n lambda1 * ((trigram_count[list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2]].values[0]) / float(bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0])) +\n lambda2 * (bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0] / float(uni_count[list_of_words[i]].values[0])) +\n lambda3 * (uni_count[list_of_words[i]].values[0] / float(N))\n if list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2] in trigram_count.columns.values\n else lambda2 * (bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0] / float(uni_count[list_of_words[i]].values[0])) +\n lambda3 * (uni_count[list_of_words[i]].values[0] / float(N))\n if list_of_words[i] + \" \" + list_of_words[i + 1] in bigram_count.columns.values\n else lambda3 * (uni_count[list_of_words[i]].values[0] / float(N)) for i in xrange(len(list_of_words) - 2)}\n return proba_dict", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def handlePileup(self):\n # find out local site SE name\n siteConfig = loadSiteLocalConfig()\n PhEDExNodeName = siteConfig.localStageOut[\"phedex-node\"]\n self.logger.info(\"Running on site '%s', local PNN: '%s'\", siteConfig.siteName, PhEDExNodeName)\n\n pileupDict = self._getPileupConfigFromJson()\n\n # 2011-02-03 according to the most recent version of instructions, we do\n # want to differentiate between \"MixingModule\" and \"DataMixingModule\"\n mixModules, dataMixModules = self._getPileupMixingModules()\n\n # 2011-02-03\n # on the contrary to the initial instructions (wave), there are\n # going to be only two types of pileup input datasets: \"data\" or \"mc\"\n # unlike all previous places where pileupType handled in a flexible\n # way as specified in the configuration passed by the user, here are\n # the two pileupTypes hardcoded: and we are going to add the \"mc\"\n # datasets to \"MixingModule\"s and only add the \"data\" datasets to the\n # \"DataMixingModule\"s\n\n # if the user in the configuration specifies different pileup types\n # than \"data\" or \"mc\", the following call will not modify anything\n self._processPileupMixingModules(pileupDict, PhEDExNodeName, dataMixModules, \"data\")\n self._processPileupMixingModules(pileupDict, PhEDExNodeName, mixModules, \"mc\")\n\n return", "def center_of_gravity_evaluation(F_PERC, P_PERC, afg, awg, mw, ed, ui, bi):\n\n max_seg_n = np.max([np.amax(afg.fuse_seg_nb), np.amax(awg.wing_seg_nb)])\n t_nb = afg.fus_nb + awg.w_nb # Number of parts not counting symmetry\n tot_nb = afg.fuse_nb + awg.wing_nb # Number of parts counting symmetry\n segments_nb = []\n fuse_fuel_vol = 0\n pass_vol = 0\n\n for i in range(1, afg.fus_nb + 1):\n segments_nb.append(afg.fuse_seg_nb[i - 1])\n if ui.F_FUEL[i - 1]:\n fuse_fuel_vol += afg.fuse_fuel_vol[i - 1]\n if np.all(afg.cabin_seg[:, i - 1]) == 1:\n pass_vol += afg.fuse_vol[i - 1]\n else:\n pass_vol += afg.fuse_cabin_vol[i - 1]\n\n htw = 0\n x0 = 0\n s = 0\n for i in range(1, awg.w_nb + 1):\n segments_nb.append(awg.wing_seg_nb[i - 1])\n if awg.wing_sym[i - 1] != 0:\n segments_nb.append(awg.wing_seg_nb[i - 1])\n s += 1\n if awg.is_horiz[i - 1 + s]:\n if i != awg.main_wing_index:\n htw = i\n else:\n x = np.amax(awg.wing_center_seg_point[:, i + s - 1, 0])\n if x > x0:\n tw = i\n x0 = x\n\n mass_seg_i = np.zeros((max_seg_n, tot_nb))\n oem_vol = (awg.wing_tot_vol - awg.wing_fuel_vol) + (np.sum(afg.fuse_vol) - fuse_fuel_vol)\n\n # Evaluating oem density, fuel density, passenger density\n if bi.USER_EN_PLACEMENT:\n oem_par = (mw.operating_empty_mass - mw.mass_engines) / oem_vol\n en = mw.mass_engines\n else:\n oem_par = mw.operating_empty_mass / oem_vol\n en = 0\n\n mpass_par = (mw.mass_payload * (P_PERC / 100.0)) / pass_vol\n\n mfuel_par = (mw.mass_fuel_tot * (F_PERC / 100.0)) / (awg.wing_fuel_vol + fuse_fuel_vol)\n\n mtom = (\n mw.operating_empty_mass\n + mw.mass_payload * (P_PERC / 100)\n + mw.mass_fuel_tot * (F_PERC / 100)\n - en\n )\n\n # Definition of the mass of each segment\n ex = False\n wg = []\n for i in range(1, afg.fus_nb + 1):\n if ui.F_FUEL[i - 1]:\n for j in range(1, afg.fuse_seg_nb[i - 1] + 1):\n mass_seg_i[j - 1][i - 1] = (\n oem_par + (mfuel_par * ui.F_FUEL[i - 1] / 100)\n ) * afg.fuse_seg_vol[j - 1][i - 1]\n else:\n for j in range(1, afg.fuse_seg_nb[i - 1] + 1):\n if int(afg.cabin_seg[j - 1][i - 1]) == 1:\n mass_seg_i[j - 1][i - 1] = (oem_par + mpass_par) * afg.fuse_seg_vol[j - 1][\n i - 1\n ]\n else:\n mass_seg_i[j - 1][i - 1] = oem_par * afg.fuse_seg_vol[j - 1][i - 1]\n w = 0\n for i in range(afg.fus_nb + 1, t_nb + 1):\n for j in range(1, awg.wing_seg_nb[i - 1 - afg.fus_nb] + 1):\n if awg.is_horiz[i + w - 1 - afg.fus_nb]:\n mass_seg_i[j - 1][i - 1 + w] = oem_par * (\n awg.wing_seg_vol[j - 1][i - 1 - afg.fus_nb]\n - awg.wing_fuel_seg_vol[j - 1][i - 1 - afg.fus_nb]\n ) + mfuel_par * (awg.wing_fuel_seg_vol[j - 1][i - 1 - afg.fus_nb])\n else:\n mass_seg_i[j - 1][i - 1 + w] = (\n oem_par * awg.wing_seg_vol[j - 1][i - 1 - afg.fus_nb]\n )\n wg.append(i - afg.fus_nb)\n if awg.wing_sym[i - 1 - afg.fus_nb] != 0:\n w += 1\n mass_seg_i[:, i - 1 + w] = mass_seg_i[:, i - 2 + w]\n wg.append(i - afg.fus_nb)\n if i + w == tot_nb:\n break\n # Mass check\n while not ex:\n if abs(round(mtom, 3) - round(np.sum(mass_seg_i), 3)) < 0.0001:\n ex = True\n else:\n mass = (round(mtom, 3) - round(np.sum(mass_seg_i), 3)) / 2\n if not ed.WING_MOUNTED:\n if htw != 0:\n a = wg.index(htw)\n else:\n a = wg.index(tw)\n else:\n a = wg.index(awg.main_wing_index)\n mass_seg_i[0][afg.fuse_nb + a] = mass_seg_i[0][afg.fuse_nb + a] + mass\n if awg.is_horiz[a]:\n mass_seg_i[0][afg.fuse_nb + a + 1] = mass_seg_i[0][afg.fuse_nb + a + 1] + mass\n else:\n mass_seg_i[0][afg.fuse_nb + a] = mass_seg_i[0][afg.fuse_nb + a] + mass\n\n awg.wing_center_seg_point.resize(max_seg_n, awg.wing_nb, 3)\n afg.fuse_center_seg_point.resize(max_seg_n, afg.fuse_nb, 3)\n\n airplane_centers_segs = np.concatenate(\n (afg.fuse_center_seg_point, awg.wing_center_seg_point), 1\n )\n\n # CoG evalution\n if bi.USER_EN_PLACEMENT:\n cog_enx = np.sum(ed.EN_PLACEMENT[:, 0] * ed.en_mass)\n cog_eny = np.sum(ed.EN_PLACEMENT[:, 1] * ed.en_mass)\n cog_enz = np.sum(ed.EN_PLACEMENT[:, 2] * ed.en_mass)\n else:\n cog_enx = 0.0\n cog_eny = 0.0\n cog_enz = 0.0\n\n center_of_gravity = []\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 0] * mass_seg_i) + cog_enx) / mtom, 3)\n )\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 1] * mass_seg_i) + cog_eny) / mtom, 3)\n )\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 2] * mass_seg_i) + cog_enz) / mtom, 3)\n )\n\n for i in range(1, 4):\n if abs(center_of_gravity[i - 1]) < 10 ** (-5):\n center_of_gravity[i - 1] = 0.0\n\n return (center_of_gravity, mass_seg_i, airplane_centers_segs)", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def update(self, sample, oppo_target_policy, oppo_policy, parallel=False, logger=None,iter=5):\n obs, acs, rews, next_obs, dones = sample\n\n self.critic_optimizer.zero_grad()\n # if self.alg_types[agent_i] == 'MADDPG':\n if self.discrete_action: # one-hot encode action\n if self.agent_i ==0:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [onehot_from_logits(pi(nobs)) for pi, nobs in\n # zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n if self.agent_i ==0:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([self.target_policy,oppo_target_policy], next_obs)]\n else:\n all_trgt_acs = [pi(nobs) for pi, nobs in\n zip([oppo_target_policy,self.target_policy], next_obs)]\n # all_trgt_acs = [pi(nobs) for pi, nobs in zip(self.target_policy,\n # next_obs)]\n trgt_vf_in = torch.cat((*next_obs, *all_trgt_acs), dim=1)\n\n if self.discrete_action:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *\n self.target_critic(trgt_vf_in) *\n (1 - dones[self.agent_i].view(-1, 1))) #change after\n else:\n target_value = (rews[self.agent_i].view(-1, 1) + self.gamma *self.target_critic(trgt_vf_in)*(dones.view(-1, 1)))\n\n vf_in = torch.cat((*obs, *acs), dim=1)\n actual_value = self.critic(vf_in)\n vf_loss = MSELoss(actual_value, target_value.detach())\n vf_loss.backward()\n\n torch.nn.utils.clip_grad_norm(self.critic.parameters(), 0.5)\n self.critic_optimizer.step()\n\n self.policy_optimizer.zero_grad()\n\n if self.discrete_action:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = gumbel_softmax(curr_pol_out, hard=True)\n else:\n curr_pol_out = self.policy(obs[self.agent_i])\n curr_pol_vf_in = curr_pol_out\n\n all_pol_acs = []\n if self.discrete_action:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[1])))\n else:\n all_pol_acs.append(onehot_from_logits(oppo_policy(obs[0])))\n all_pol_acs.append(curr_pol_vf_in)\n else:\n if self.agent_i == 0:\n all_pol_acs.append(curr_pol_vf_in)\n all_pol_acs.append(oppo_policy(obs[1]))\n else:\n all_pol_acs.append(oppo_policy(obs[0]))\n all_pol_acs.append(curr_pol_vf_in)\n\n #\n # for i, ob in zip(range(self.nagents), obs):\n # if i == self.agent_i-1:\n # all_pol_acs.append(curr_pol_vf_in)\n # elif self.discrete_action:\n # all_pol_acs.append(onehot_from_logits(self.policy(ob)))\n # else:\n # all_pol_acs.append(self.policy(ob))\n\n vf_in = torch.cat((*obs, *all_pol_acs), dim=1)\n\n pol_loss = -self.critic(vf_in).mean()\n pol_loss += (curr_pol_out**2).mean() * 1e-3\n pol_loss.backward()\n total_norm=0\n for p in self.policy.parameters():\n param_norm = p.grad.data.norm(2)\n total_norm += param_norm.item() ** 2\n total_norm = total_norm ** (1. / 2)\n torch.nn.utils.clip_grad_norm(self.policy.parameters(), 0.5)\n self.policy_optimizer.step()", "def do_instance_pruning(self):\n\n # retrieve the probability of predicting fraud for each model (K models)\n # size: K x ChunkSize x 2 (2 for binary labels)\n predict_proba_fraud = [-1] * self.K\n\n # for each instance in the data chunk\n for i, instance in enumerate(self.y_chunk):\n sum_weight = 0\n current_F = 0\n F_vect = np.zeros(self.K) # Fk at each stage\n\n # compute F_k(y) for k = 1...K - the classifiers are sorted in DESCENDING order of weights\n k = -1\n for model in self.models.islice(start=0, stop=self.K, reverse=True):\n k += 1\n clf = model.clf\n sum_weight += model.weight\n\n # compute the current probability\n # if the probability is not initialized we call the `predict_proba` method\n if (type(predict_proba_fraud[k]) is int and predict_proba_fraud[k] == -1) \\\n or (predict_proba_fraud[k].shape[0] != self.S):\n predict_proba_fraud[k] = clf.predict_proba(self.X_chunk)\n\n # check if we have the probabilities of 2 labels (because we're working with BINARY classification)\n # if we don't have the probability of predicting fraud it will be 0 so we don't do anything\n if len(predict_proba_fraud[k][i]) == 2:\n current_F += model.weight * predict_proba_fraud[k][i][1]\n\n # (2) compute the Fk for each example seen at each stage\n F_k = current_F / sum_weight\n F_vect[k] = F_k\n\n # (3) compute the error\n err_x = F_vect - F_vect[-1]\n\n # (4) update the mean and the variance of the error of these training examples for each bin (i,k)\n # we look at the error at each step for the given example\n for k, err in enumerate(err_x):\n # 1 --> we assign Fk to the corresponding bin (i,k) or (j,k)here because we used i index before\n eps = len(self.bins)\n\n for j in range(0, eps):\n if (j / eps) <= F_vect[k] < ((j + 1) / eps):\n self.bins[j][k]['num'] += 1\n\n # 2--> we compute the mean error in this bin\n self.bins[j][k]['mean'] += err\n\n # 2--> we compute the variance of the error in this bin\n # (basically we will just compute the squared error and do the division later)\n self.bins[j][k]['var'] += err ** 2\n\n # if we've assigned it to a bin, break and go to the next stage\n break\n\n # after computing everything we do the division by the total number assigned to a bin\n for i in range(0, len(self.bins)):\n # a bit tricky because sometimes we have bins that don't have any input example --> remains at 0\n for k in range(self.K):\n if self.bins[i][k]['num'] > 0:\n # divide the sum of error by the number of examples in the bin\n self.bins[i][k]['mean'] = self.bins[i][k]['mean'] / self.bins[i][k]['num']\n\n # compute the variance\n self.bins[i][k]['var'] = (self.bins[i][k]['var'] / self.bins[i][k]['num']) - \\\n (self.bins[i][k]['mean']) ** 2", "def ask(self, number=None, xmean=None, sigma_fac=1):\r\n pop_geno = self.ask_geno(number, xmean, sigma_fac)\r\n\r\n\r\n # N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!\r\n # new data: 11.5s vs 9.5s == 20%\r\n # TODO: check here, whether this is necessary?\r\n # return [self.gp.pheno(x, copy=False, bounds=self.gp.bounds) for x in pop] # probably fine\r\n # return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved\r\n # pop_pheno = [Solution(self.gp.pheno(x, copy=False), copy=False).repair(self.gp.bounds) for x in pop_geno]\r\n pop_pheno = [self.gp.pheno(x, copy=True, bounds=self.gp.bounds) for x in pop_geno]\r\n\r\n if not self.gp.isidentity or use_sent_solutions: # costs 25% in CPU performance with N,lambda=20,200\r\n # archive returned solutions, first clean up archive\r\n if self.countiter % 30/self.popsize**0.5 < 1:\r\n self.sent_solutions.truncate(0, self.countiter - 1 - 3 * self.N/self.popsize**0.5)\r\n # insert solutions\r\n for i in xrange(len(pop_geno)):\r\n self.sent_solutions[pop_pheno[i]] = {'geno': pop_geno[i],\r\n 'pheno': pop_pheno[i],\r\n 'iteration': self.countiter}\r\n return pop_pheno", "def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):\r\n assert vis or not test_data.shuffle\r\n data_names = [k[0] for k in test_data.provide_data]\r\n\r\n i = 0\r\n t = time.time()\r\n imdb_boxes = list()\r\n original_boxes = list()\r\n for im_info, data_batch in test_data:\r\n t1 = time.time() - t\r\n t = time.time()\r\n\r\n scale = im_info[0, 2]\r\n scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)\r\n t2 = time.time() - t\r\n t = time.time()\r\n\r\n # assemble proposals\r\n dets = np.hstack((boxes, scores))\r\n original_boxes.append(dets)\r\n\r\n # filter proposals\r\n keep = np.where(dets[:, 4:] > thresh)[0]\r\n dets = dets[keep, :]\r\n imdb_boxes.append(dets)\r\n\r\n if vis:\r\n vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)\r\n\r\n logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +\r\n 'proposal %d ' % (dets.shape[0]) +\r\n 'data %.4fs net %.4fs' % (t1, t2))\r\n i += 1\r\n\r\n assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'\r\n\r\n # save results\r\n rpn_folder = os.path.join(imdb.root_path, 'rpn_data')\r\n if not os.path.exists(rpn_folder):\r\n os.mkdir(rpn_folder)\r\n\r\n rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')\r\n with open(rpn_file, 'wb') as f:\r\n cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n if thresh > 0:\r\n full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')\r\n with open(full_rpn_file, 'wb') as f:\r\n cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n logger.info('wrote rpn proposals to %s' % rpn_file)\r\n return imdb_boxes", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def evolve(self, evolutions_to_run, evolution_count = 0):\n\n\n for i in range(evolutions_to_run):\n self.population = self.mating.mate(self.population)\n self.population = mutate.mutate(self.population)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()\n self.population = self.optimize_best(\n self.population,\n evolution_count\n )\n self._sort()\n self._save_all_chromosomes(evolution_count)\n self.display_population(evolution_count)\n evolution_count += 1" ]
[ "0.48199168", "0.47175395", "0.47008735", "0.46337798", "0.46286178", "0.45170808", "0.45046583", "0.4494042", "0.44697076", "0.44640133", "0.44437954", "0.44316712", "0.44096234", "0.44018438", "0.43755022", "0.43748388", "0.43502957", "0.4346338", "0.43419045", "0.43342885", "0.4333669", "0.4330165", "0.43293446", "0.43179604", "0.43147725", "0.4311653", "0.43077552", "0.4304145", "0.42965823", "0.42872447" ]
0.70409745
0
Sort np.array by column.
def sort_by_col(array, idx=1): order = np.argsort(array[:, idx])[::-1] return array[order]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(headers, data): # extension\n\tcolumn_matrix=data.get_data(headers) # get raw matrix data for numeric values\n\tprint \"\\n before sorting \\n \"\n\tprint column_matrix\n\t\n\tcolumn_matrix=column_matrix.tolist()\n\tcolumn_array=np.asarray(column_matrix)\n\t\n\tcolumn_array.sort(axis=0)\n\t\n\tprint \"\\n \\n done sorting here is your matrix \\n\"\n\t\n\treturn column_array", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order != Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def sort_by_rows(arr):\n return arr[np.lexsort(arr.T[::-1])]", "def sortByColumn(data_file, column_to_sort=1):\n cmp = lambda qvar: float(qvar.split()[column_to_sort-1]) # used in \"sorted\" method\n in_file = open(data_file, \"r\")\n out_file = open(data_file+\".TEMP\", \"w\")\n for a_line in sorted(in_file.readlines(),key=cmp):\n out_file.write(a_line)\n in_file.close()\n out_file.close()\n shutil.copy(data_file+\".TEMP\", data_file)\n os.remove(data_file+\".TEMP\")", "def sort_re_analysis_grid_column(self, column_name, descending_order):\n self.sort_grid_column(self.re_analysis_grid_div_id, column_name, descending_order)", "def sort(self, column_or_label, descending=False, distinct=False):\n column = self._get_column(column_or_label)\n if distinct:\n _, row_numbers = np.unique(column, return_index=True)\n else:\n row_numbers = np.argsort(column, axis=0)\n assert (row_numbers < self.num_rows).all(), row_numbers\n if descending:\n row_numbers = np.array(row_numbers[::-1])\n return self.take(row_numbers)", "def sort_vendors_grid_column(self, column_name, descending_order):\n self.sort_grid_column(self.vendors_div_id, column_name, descending_order)", "def sort_col(col):\n return (col[0], sorted(col[1], key=lambda pair: pair[0]))", "def _sort_dataframe(self, dataframe):\r\n columns = list(dataframe.columns)\r\n columns.sort()\r\n dataframe = dataframe[columns]\r\n return dataframe", "def sort(df, cols):\n if not df:\n return []\n\n return sorted(df, key=lambda row: [row[col] for col in cols])", "def sortby(tree, col, descending):\r\n # grab values to sort\r\n data = [(tree.set(child, col), child) for child in tree.get_children('')]\r\n \r\n #Figure out if this is a float column. If it is,\r\n #transform to float so the ordering will be meaningful\r\n try:\r\n \r\n data = [( float(x[0]), x[1]) for x in data]\r\n \r\n except:\r\n \r\n #Nope!\r\n pass\r\n \r\n # reorder data\r\n data.sort(reverse=descending)\r\n for indx, item in enumerate(data):\r\n tree.move(item[1], '', indx)\r\n\r\n # switch the heading so that it will sort in the opposite direction\r\n tree.heading(col,\r\n command=lambda col=col: sortby(tree, col, int(not descending)))", "def sort_col_by_sim(col):\n sims = sorted(col[1], key=lambda pair: (pair[0], pair[1]), reverse=True)\n return (col[0], sims[0:k])", "def sort_column(self, column, reverse):\n data = [(self.treeview.set(child, column), child) for child in self.treeview.get_children(\"\")]\n data.sort(reverse=reverse)\n\n # set sort symbol on column heading\n if reverse:\n self.treeview.heading(column, text=f\"{column} ▼\")\n else:\n self.treeview.heading(column, text=f\"{column} ▲\")\n\n # rearrange items in sorted positions\n for index, (value, child) in enumerate(data):\n self.treeview.move(child, \"\", index)\n\n # reverse sort direction\n self.treeview.heading(column, command=lambda: self.sort_column(column, not reverse))", "def mysort(arr):\n arr.sort(key=int)\n\n return arr", "def sort_by_column(self, columns, ascending=False):\n columns = to_list(columns)\n\n # Create sort criteria list, with each row as tuple of column values\n values = (self.get_column(column, as_list=True) for column in columns)\n values = list(zip(*values))\n assert len(values) == self.size\n\n def sorter(row):\n \"\"\"Sort table by given values, while allowing for disparate types.\n Order priority:\n - Values by typename\n - Numeric types\n - None values\n \"\"\"\n criteria = []\n for value in row[1]: # Ignore enumeration\n criteria.append(\n (\n value is not None,\n \"\" if isinstance(value, Number) else type(value).__name__,\n value,\n )\n )\n return criteria\n\n # Store original index order using enumerate() before sort,\n # and use it to sort data later\n values = sorted(enumerate(values), key=sorter, reverse=not ascending)\n idxs = [value[0] for value in values]\n\n # Re-order data\n self._data = [self._data[idx] for idx in idxs]", "def _sort_column(self, column, reverse):\n if tk.DISABLED in self.state():\n return\n # get list of (value, item) tuple where value is the value in column for the item\n l = [(self.set(child, column), child) for child in self.get_children('')]\n # sort list using the column type\n l.sort(reverse=reverse, key=lambda x: self._column_types[column](x[0]))\n # reorder items\n for index, (val, child) in enumerate(l):\n self.move(child, \"\", index)\n # reverse sorting direction for the next time\n self.heading(column, command=lambda: self._sort_column(column, not reverse))", "def custom_sort(arr):\n pass", "def sort_country_groups_grid_column(self, column_name, descending_order):\n self.sort_grid_column(self.country_groups_grid_div_id, column_name, descending_order)", "def sortby(self, col, descending):\n\t\t# grab values to sort\n\t\ttree = self.albumList\n\t\tdata = [(tree.set(child, col), child) for child in tree.get_children('')]\n\n\t\t# reorder data\n\t\tdata.sort(reverse=descending)\n\t\tfor indx, item in enumerate(data):\n\t\t tree.move(item[1], '', indx)\n\n\t\t# switch the heading so that it will sort in the opposite direction\n\t\ttree.heading(col,\n\t\t command=lambda col=col: self.sortby(col, int(not descending)))", "def sort_re_analysis_detail_grid_column(self, column_name, descending_order):\n self.sort_grid_column(self.re_analysis_detail_grid_div_id, column_name, descending_order)", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def argsort(self, axis=-1, kind=None, order=None):\n return self.__array__().argsort(axis, kind, order)", "def sort_by(rows: [dict], col_name: str, rev: bool) -> [dict]:\n def get_col(row):\n return row[col_name]\n return sorted(rows, key=get_col, reverse=rev)", "def sortTable(self, table, cols):\n #productive\n profprint()\n for col in reversed(cols):\n table = sorted(table, key=operator.itemgetter(col))\n return table", "def sort(self, column: int, order: Qt.SortOrder = ...) -> None:\n col = self._dataframe.columns[column]\n # Temporary column with display value of column\n self._dataframe['_FOR_SORT'] = self._dataframe.apply(\n lambda row: self._get_cell_value(self._dataframe.index.get_loc(row.name), column), axis=1)\n # Sort by temp column\n self._dataframe.sort_values(by=['_FOR_SORT'], ascending=True if order == 0 else False,\n inplace=True, axis=0)\n # drop temp column\n self._dataframe.drop('_FOR_SORT', axis=1, inplace=True)\n self.layoutChanged.emit()\n super().sort(column, order)", "def cols_sorted(self, cmp=None, key=None, reverse=False):\n return self.select(*sorted(self.names, cmp, key, reverse))", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == QtCore.Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def order_columns(dataset, col_names, feature_to_predict):\n col_names.remove(feature_to_predict)\n col_names.sort()\n\n sorted_column_list = [feature_to_predict]\n for col_name in col_names:\n if col_name != feature_to_predict:\n sorted_column_list.append(col_name)\n\n dataset = dataset.reindex(sorted_column_list, axis=1)\n return dataset", "def sort(self, col, order):\n self.layoutAboutToBeChanged.emit()\n self.mylist = sorted(self.mylist,\n key=operator.itemgetter(col))\n if order == Qt.DescendingOrder:\n self.mylist.reverse()\n self.layoutChanged.emit()" ]
[ "0.72792816", "0.68471915", "0.6842164", "0.66743934", "0.6556128", "0.6435283", "0.6383058", "0.63643396", "0.6342741", "0.62759876", "0.6222461", "0.6124097", "0.606763", "0.6065433", "0.60635644", "0.6037835", "0.6035828", "0.5991915", "0.59912336", "0.59626365", "0.5951189", "0.59252566", "0.58844155", "0.5877926", "0.5871428", "0.58580434", "0.5852717", "0.5851886", "0.58474255", "0.58456695" ]
0.7863806
0
Convert row of pd.DataFrame to variables.
def row_to_vars(row): img_id = row["img_id"] conf = row["confidence"] iou = np.array(row["iou"]) difficult = np.array(row["difficult"]) crowd = np.array(row["crowd"]) order = np.argsort(iou)[::-1] return img_id, conf, iou, difficult, crowd, order
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_row(self, row) :\n\n self.row_id += 1\n data = [self.row_id]\n\n if type(row) == type({}) :\n data.extend(row.get(col, None) for col in self.cols[1:])\n elif type(row) in [type([]), type(())] :\n data.extend(row)\n elif type(row) == RowReference :\n data.extend(row.values())\n else :\n raise Exception(\n 'Don''t know how to add row from: %s ' % str(row)\n )\n\n if len(data) != len(self.cols) :\n raise Exception(\n 'Wrong number of values for new row with cols %s: %s' % \n (str(self.cols), str(data))\n \n )\n\n return data", "def convert_data(df):\n print(\"Converting history...\")\n return [ dict(row) for i, row in df.iterrows() ]", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def convert_series(self, column: str, dtype: str) -> TYPE_ROW:\n\n series = self.df[column]\n\n if dtype != \"float\":\n series = series.fillna(NULL)\n\n values = self.force_dtype(series, dtype)\n\n return values", "def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data", "def _compute_variables(df: EDAFrame, cfg: Config) -> Dict[str, Any]:\n data: Dict[str, Any] = {}\n # variables\n if cfg.variables.enable:\n for col in df.columns:\n try:\n dtype = df.get_eda_dtype(col)\n # Since it will throw error if a numerical column is all-nan,\n # we transform it to categorical column.\n # We also transform to categorical for small cardinality numerical column.\n if df.get_missing_cnt(col) == df.shape[0]:\n srs = df.get_col_as_str(col, na_as_str=True)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, (Nominal, GeoGraphy, GeoPoint)):\n data[col] = nom_comps(df.frame[col], cfg)\n elif isinstance(dtype, SmallCardNum):\n srs = df.get_col_as_str(col, na_as_str=False)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, Continuous):\n data[col] = cont_comps(df.frame[col], cfg)\n # elif isinstance(dtype, DateTime):\n # data[col] = {}\n # data[col][\"stats\"] = calc_stats_dt(df.frame[col])\n # data[col][\"line\"] = dask.delayed(_calc_line_dt)(df.frame[[col]], \"auto\")\n else:\n raise ValueError(f\"unprocessed type in column{col}:{dtype}\")\n except:\n print(f\"error happended in column:{col}\", file=sys.stderr)\n raise\n return data", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def preprocess(self, df: pd.DataFrame) -> pd.DataFrame:\n for transform in self.transforms:\n cols = transform(df)\n\n if isinstance(cols, pd.Series):\n df.loc[:, transform.name] = cols\n\n elif isinstance(cols, list):\n for i, col in enumerate(cols):\n\n if not isinstance(col, pd.Series):\n raise ValueError('Invalid Transform return type:', type(cols))\n\n df.loc[:, f'{transform.name}_{i}'] = col\n\n else:\n raise ValueError('Invalid Transform return type:', type(cols))\n return df", "def _extract(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return\n\n if 'extract' not in self._state:\n self._state['extract'] = self._replace_fields(self._args.extract)\n\n r = list(map(self._convert, row))\n return eval(self._state['extract'])", "def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var", "def parse_df(data):\n\n # First column should be the ids\n ids = list(data.iloc[:, 0])\n\n # Second column should hold the labels\n labels = list(data.iloc[:, 1])\n\n # From third columns, we should have the features\n features = list(data.iloc[:, 2:].values)\n\n return ids, labels, features", "def df_to_inputs(self, df: pd.DataFrame) -> Tuple:\n title_input = self.process_titles(df[\"title\"])\n body_input = self.process_bodies(df[\"body\"])\n feat = df[self.FEATURES].copy()\n return title_input, body_input, feat", "def ConvertRow(self, row):\n i = 0\n data = []\n for entry in row['f']:\n data.append(self.Convert(entry['v'], self.schema[i]))\n i += 1\n return tuple(data)", "def from_dataframe(df):\n X = sm.add_constant(np.array(df['x']))\n y = np.array(df['y']).reshape(-1,1)\n return y, X", "def _df_meta_to_arr(df):\n\n if len(df.columns):\n if isinstance(df.columns[0], str):\n columns = df.columns.values.astype(\"S\")\n else:\n columns = df.columns.values\n else:\n columns = []\n\n if len(df.index):\n if isinstance(df.index[0], str):\n index = df.index.values.astype(\"S\")\n else:\n index = df.index.values\n else:\n index = []\n\n return columns, index", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]", "def preprocess(self, df: pd.DataFrame) -> np.ndarray:\n assert_subset(self.feature_columns, df.columns)\n return df[self.feature_columns].fillna(IMPUTATION_VALUE).values", "def _get_xls_row_vals(self, row):\n return [v.value for v in row]", "def prepare_data(df):\n X = df.drop(\"y\",axis=1)\n\n y=df[\"y\"]\n\n return X, y", "def preprocess_feature(df):", "def convert_series_to_proto_values(row: pd.Series):\n\n feature_row = FeatureRowProto.FeatureRow(\n event_timestamp=_pd_datetime_to_timestamp_proto(\n dataframe[DATETIME_COLUMN].dtype, row[DATETIME_COLUMN]\n ),\n feature_set=feature_set.project + \"/\" + feature_set.name,\n )\n\n for field_name, field in feature_set.fields.items():\n feature_row.fields.extend(\n [\n FieldProto.Field(\n name=field.name,\n value=_python_value_to_proto_value(\n field.dtype, row[field.name]\n ),\n )\n ]\n )\n return feature_row", "def preprocess_dataframe(self, dataframe):\n return dataframe", "def get_values(df):\n return df.columns.values.tolist()", "def tsvRowToDict(row):\n return {col: getattr(row, col) for col in row._columns_}", "def changeVar(self, df):\n\n # \"ArrDelay\" and \"DepDelay\" have string type. We cast them to Integer\n df = df.withColumn(\"ArrDelay\", df[\"ArrDelay\"].cast(IntegerType()))\n df = df.withColumn(\"DepDelay\", df[\"DepDelay\"].cast(IntegerType()))\n df = df.withColumn(\"CRSDepTime\", df[\"CRSDepTime\"].cast(IntegerType()))\n df = df.withColumn(\"CRSArrTime\", df[\"CRSArrTime\"].cast(IntegerType()))\n df = df.withColumn(\"DepTime\", df[\"DepTime\"].cast(IntegerType()))\n df = df.withColumn(\"DayOfWeek\", df[\"DayOfWeek\"].cast(IntegerType()))\n\n return df", "def make_numeric(\n df,\n vars_=[\n 'emp',\n 'empszfi',\n 'firmpdemp',\n 'payann',\n 'rcppdemp',\n 'eth_group',\n 'geotype',\n 'rcpszfi',\n 'sex',\n 'vet_group']):\n df[vars_] = df[vars_].apply(pd.to_numeric)\n return df", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr", "def pandas2R(df):\n with localconverter(robjects.default_converter + pandas2ri.converter):\n data = robjects.conversion.py2rpy(df)\n return data" ]
[ "0.59126335", "0.56355083", "0.5585188", "0.5584195", "0.5535919", "0.55313474", "0.550168", "0.5469533", "0.54181707", "0.53959996", "0.5389221", "0.5388655", "0.53874075", "0.5379879", "0.53376657", "0.5313978", "0.52934957", "0.5256191", "0.5234749", "0.52314323", "0.52254885", "0.5217036", "0.5204419", "0.5181821", "0.51793724", "0.51741266", "0.5163767", "0.5136989", "0.513658", "0.5126091" ]
0.60037434
0
Check the repos before the backup commences.
def pre_backup_check(repos): for repo in 'local', 'remote': repos[repo].check() # TODO: Check the ordering of this is deterministic most_recent_archive = repos[repo].list_archives()[-1] repos[repo].check_archive(most_recent_archive)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkGit(directory):", "def check_repo(self):\n if not os.path.exists(self.path):\n log.error(\"no dots repository found at '{}'\".format(self.path))\n if not os.path.exists(self.files_path):\n log.error(\"corrupted repository, the 'files' subfolder is missing\")\n if not os.path.exists(self.enc_files_path):\n log.error(\"corrupted repository, the 'encrypted' subfolder is missing\")\n if not os.path.exists(os.path.join(self.path, '.git')):\n log.error(\"corrupted repository, folder exists but is not versioned\")\n self.git_repo = Repo(self.path)", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def init_hook(conduit):\n repos = conduit.getRepos()\n for repo in repos.listEnabled():\n if len(repo.baseurl) == 0:\n continue\n bucket, path = parse_url(repo.baseurl[0])\n if bucket and isinstance(repo, YumRepository):\n check_base_url(repo.baseurl)\n replace_repo(repos, repo)", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def precommit(exit=True):\n tmpdir = tempfile.mkdtemp()\n\n try:\n copy_index(tmpdir)\n\n modified = check_output(['git', 'diff', '--cached', '--name-only',\n '--diff-filter=ACMRT'])\n modified = [name.strip() for name in modified.splitlines()]\n path = os.environ['PATH']\n with pushd(tmpdir) as prevdir:\n conf = load_conf()\n # Activate the virtualenv before running checks\n if 'env' in conf:\n binpath = os.path.abspath(os.path.join(prevdir,\n conf['env']['path'],\n 'bin'))\n if binpath not in path.split(os.pathsep):\n path = binpath + os.pathsep + path\n retcode = run_checks(conf.get('hooks_all', []),\n conf.get('hooks_modified', []), modified,\n path)\n\n if exit:\n sys.exit(retcode)\n else:\n return retcode\n finally:\n shutil.rmtree(tmpdir)", "def test_correct_repo(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(SUCCESS_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )", "def check_heads(repo, their_heads, context):\n heads = repo.heads()\n heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()\n if not (\n their_heads == [b'force']\n or their_heads == heads\n or their_heads == [b'hashed', heads_hash]\n ):\n # someone else committed/pushed/unbundled while we\n # were transferring data\n raise error.PushRaced(\n b'repository changed while %s - please try again' % context\n )", "def ensure_clean_repo(ctx):\n ctx.runprocess(['git', 'status', '--porcelain'],\n check_stdout='',\n check_stderr='',\n fail_message='Repository %s not clean' % os.getcwd())", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def test_missing_repos(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n \"\"\"\n ).pre_commit(\n \"\"\"\n grepos:\n - hooks:\n - id: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, \" doesn't have the 'repos' root key\")\n )", "def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)", "def _check_repository(self):\n if not os.path.exists(\"%s/.git\" % self._repository_path):\n Repo.clone_from(self.REPOSITORY_ADDRESS, self._repository_path)\n\n self._repo = Repo(self._repository_path)\n self._pull()", "def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")", "def postCheckDeps(self):\n if( self.mode == \"install\" ):\n\n # check for make\n if( not isinPath( \"make\" )):\n self.abort( \"make not found on your system!!\" )\n\n # check for tee\n if( not isinPath( \"tee\" )):\n self.abort( \"tee not found on your system!!\" )", "def initializeBuildArea(self):\r\n\r\n repo_map = self.getRepoStatus()\r\n\r\n for obj in self.config[\"repos\"]:\r\n if obj[\"name\"] not in repo_map:\r\n if \"url\" in obj:\r\n print(\"Checking out code to {} for {}\".format(obj[\"path\"], obj[\"name\"]))\r\n if \"branch\" in obj:\r\n self.cloneGitRepo(obj[\"url\"], obj[\"path\"], obj[\"branch\"])\r\n else:\r\n self.cloneGitRepo(obj[\"url\"], obj[\"path\"])\r\n\r\n else:\r\n print(\"Creating directory : {} for repo : {}\".format(obj[\"path\"], obj[\"name\"]))\r\n makedirs(obj[\"path\"])\r\n\r\n else:\r\n if self.verbose:\r\n print(\"Repo : {}, already exists skipping!!\".format(obj[\"name\"]))", "def test_runs_with_multiple_packages(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(MULTIPLE_PACKAGES_REPO)\n\n assert result.status == Status.SUCCESS", "def run(self):\n USER.info('%s: Checking For Updates', self.recipe.name)\n cur_hash = pakit.conf.IDB[self.recipe.name]['hash']\n if cur_hash == self.recipe.repo.src_hash:\n return\n\n try:\n self.save_old_install()\n InstallTask(self.recipe).run()\n USER.info('%s: Deleting Old Install', self.recipe.name)\n Command('rm -rf ' + self.back_dir).wait()\n except Exception as exc: # pylint: disable=broad-except\n logging.error(exc)\n self.restore_old_install()", "def test_default_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check dev\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)", "def sync ( self, fail_greedy=False ):\n all_success = True\n self.logger.debug ( \"Syncing repos ...\" )\n for repo in self.repos:\n self.repo_stats.sync_time.begin ( repo.name )\n if repo.sync ( sync_enabled=self.sync_enabled ):\n self.repo_stats.sync_time.end ( repo.name )\n elif fail_greedy:\n self.repo_stats.sync_time.end ( repo.name )\n return False\n else:\n self.repo_stats.sync_time.end ( repo.name )\n all_success = False\n # -- end for\n return all_success", "def __gitCheckPatches(self):\n self.vcs.gitApplyCheckPatches(self.project.getProjectPath(),\n check=True)", "def patch_repos(self):", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)", "def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty", "def test_statusClean(self):\n reposDir = self.makeRepository(self.tmpDir)\n self.assertTrue(self.createCommand.isStatusClean(reposDir))", "def flight_check():\n for command in ['git']:\n if shutil.which(command) is None:\n raise RuntimeError('command not found: {}'.format(command))\n\n git_rev_parse('HEAD')", "def _checksubrepostate(pushop):\n for n in pushop.outgoing.missing:\n ctx = pushop.repo[n]\n\n if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():\n for subpath in sorted(ctx.substate):\n sub = ctx.sub(subpath)\n sub.verify(onpush=True)", "def test_fail_repo(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(FAIL_REPO)\n\n assert result.status == Status.WARNING\n assert (\n _output.test_result_header(\n \"PrimeCheckerTest\",\n NUM_PRIME_CHECKER_TESTS,\n NUM_PRIME_CHECKER_TESTS - 2,\n _output.FAILURE_COLOR,\n )\n in result.msg\n )", "def updatecheck(self):\n self.comp('packmanager').updatecheck_allpacks()" ]
[ "0.64440554", "0.63667125", "0.63261837", "0.6066444", "0.6035908", "0.60099196", "0.5995399", "0.59861237", "0.5983702", "0.59335965", "0.5928428", "0.5857033", "0.5840177", "0.57988983", "0.5776676", "0.57754505", "0.5773697", "0.57731676", "0.577156", "0.57604474", "0.5744213", "0.5738286", "0.57261056", "0.5714965", "0.5701617", "0.56956965", "0.5692534", "0.5692455", "0.56915945", "0.56730545" ]
0.82011116
0
Perform a backup to the specified repo, and validate the files.
def perform_backup(repo, archive_name, config, logger): repo.backup(archive_name, config['backup_source_paths']) integrity_failure = False for check in config.get('check_files', []): check_command = [os.path.join('check_commands', check['command'])] path = os.path.join( config['working_directory'], check['path'].lstrip('/') ) check_command.extend(check['arguments']) check_command.append(path) repo.restore_file_from_archive(archive_name, check['path']) proc = subprocess.Popen( check_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() for line in stdout.splitlines(): logger.info(line) if proc.returncode != 0: logger.error('Backup integrity check failed!') output = logger.error integrity_failure = True else: output = logger.warning for line in stderr.splitlines(): output(line) if os.path.isdir(path): shutil.rmtree(path) else: os.unlink(path) # Make sure we fail noisily if for whatever reason the archive has become # corrupted. repo.check() repo.check_archive(archive_name) if integrity_failure: raise CheckFailure('Backup file checks failed.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_backup_check(repos):\n for repo in 'local', 'remote':\n repos[repo].check()\n\n # TODO: Check the ordering of this is deterministic\n most_recent_archive = repos[repo].list_archives()[-1]\n repos[repo].check_archive(most_recent_archive)", "def test_backup_only(self):\n # Check that by default a backup is performed and a snapshot is created.\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--backup', '--no-sudo',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure no snapshot was created.\n assert len(find_snapshots(destination)) == 0", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def _backup(self, parsed_args):\n if self.backup:\n dep_sys = self.document['deploymentSystem']\n dep_path = self.document['deploymentPath']\n backup_dep_path = dep_path + '.' + str(seconds())\n\n print_stderr('Backing up agave://{}/{}'.format(dep_sys, dep_path))\n start_time = milliseconds()\n self.messages.append(\n ('backup', 'src: agave://{}/{}'.format(dep_sys, dep_path)))\n self.messages.append(\n ('backup', 'dst: agave://{}/{}'.format(dep_sys,\n backup_dep_path)))\n\n try:\n # TODO - only do this if dep_path exists, otherwise an Exception will be raised\n manage.move(dep_path,\n system_id=dep_sys,\n destination=backup_dep_path,\n agave=self.tapis_client)\n print_stderr('Finished ({} msec)'.format(milliseconds() -\n start_time))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('backup', str(exc)))\n print_stderr('Failed ({} msec)'.format(milliseconds() -\n start_time))\n return False\n else:\n raise\n\n return True", "def run_backup():\n\n from common.models import InvenTreeSetting\n\n if not InvenTreeSetting.get_setting('INVENTREE_BACKUP_ENABLE', False, cache=False):\n # Backups are not enabled - exit early\n return\n\n interval = int(InvenTreeSetting.get_setting('INVENTREE_BACKUP_DAYS', 1, cache=False))\n\n # Check if should run this task *today*\n if not check_daily_holdoff('run_backup', interval):\n return\n\n logger.info(\"Performing automated database backup task\")\n\n call_command(\"dbbackup\", noinput=True, clean=True, compress=True, interactive=False)\n call_command(\"mediabackup\", noinput=True, clean=True, compress=True, interactive=False)\n\n # Record that this task was successful\n record_task_success('run_backup')", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def verify_backups(config, logger):\n verifier = SupraVerify(config, logger)\n verifier.verify()", "def test_simple_backup(self):\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--no-sudo', '--ionice=idle',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure a snapshot was created.\n assert len(find_snapshots(destination)) == 1", "def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t\t\t\t\t# just add them all ... probably a better/safer/more direct way to do this\n\t\t\t\t\t_ = sp.check_output(['git', 'add', '.'])\n\t\t\t\t_ = sp.check_output([\n\t\t\t\t\t\t\"git\", \"commit\", \"-am\", f\"AUTO update on {dt.date.today().isoformat()}\"])\n\n\t\t\t# presumes that there is a remote!\n\t\t\toutput = sp.check_output([\n\t\t\t\t\t\"git\", \"push\"],\n\t\t\t\t\tstderr=sp.STDOUT\n\t\t\t\t\t)\n\n\t\t\treturn output.decode()\n\t\t\t# else:\n\t\t\t# \treturn 'No changes to commit'", "def backup (self, source, destination, archive = None, excludeList = None, debug = False):\n dateTime = time.strftime (\"%d%m%Y-%H%M%S\")\n if (archive is not None):\n thisArchive = os.path.join (archive, dateTime[4:8], dateTime[2:4], dateTime)\n else:\n thisArchive = None\n\n cmnd = \"%s --archive\" % self.rsync\n if (thisArchive is not None):\n cmnd = \"%s --backup --backup-dir=%s\" % (cmnd, thisArchive)\n cmnd = \"%s --delete\" % cmnd\n if (excludeList is not None):\n for exclude in excludeList:\n cmnd = '%s --exclude=\"%s\"' % (cmnd, exclude)\n cmnd = \"%s '%s' '%s'\" % (cmnd, source, destination)\n if (self.testRun):\n pass\n else:\n result = subprocess.getstatusoutput (cmnd)\n if (result[0] != 0):\n return 0\n self.logger.info(\"RSync Output:\\n {} \\n\".format(result[1]))\n return 1", "def backup(self):\n logging.info('Executing NCBI Blast backup')\n backup_folder = self.create_backup_dir()\n if not backup_folder:\n logging.error('Failed to create backup folder.')\n return False\n # Copy only README files for future reference\n app_readme_file = self.config['readme_file']\n ncbi_readme_file = self.info_file_name\n try:\n shutil.copy2(app_readme_file, backup_folder)\n shutil.copy2(ncbi_readme_file, backup_folder)\n except Exception as e:\n logging.exception('NCBI Blast Backup did not succeed. Error: {}'\n .format(e))\n return False\n return True", "def test_backup_merge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=self.backupset.number_of_backups)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n \"\"\" remove last 6 chars of offset time in backup name\"\"\"\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n if backup_name in self.backups:\n backup_count += 1\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n self.assertEqual(backup_count, len(self.backups), \"Initial number of backups did not match\")\n self.log.info(\"Initial number of backups matched\")\n self.backupset.start = randrange(1, self.backupset.number_of_backups)\n self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)\n status, output, message = self.backup_merge(check_for_panic=True)\n if not status:\n self.fail(message)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n backup_count += 1\n if backup_name in self.backups:\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n else:\n self.fail(\"Didn't expect backup date {0} from the info command output\" \\\n \" to be in self.backups (the list of exepected backup dates\" \\\n \" after a merge)\".format(backup_name))\n\n self.assertEqual(backup_count, len(self.backups), \"Merged number of backups did not match\")\n self.log.info(\"Merged number of backups matched\")", "def backup(backupName, full, verify, verifyIncrementally = False, doTheBackup = True):\n testRestoreDir = localenv.backups.testRestoreDir\n backupDetails = localenv.backups.backups[backupName]\n backupMap = getBackupMap(backupName)\n BackupOperations.doBackup (backupDetails.source, backupMap, testRestoreDir, full = full, \n verify = verify, verifyIncrementally = verifyIncrementally, \n doTheBackup = doTheBackup, \n recordTrigger = localenv.backups.recordTrigger)", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def backup_project():\n _require_environment()\n\n # Unless explicitly provided, uses local Django settings to\n # extract username/password to access remote database\n database = env.project.get('database', None)\n if not database:\n django.settings_module(env.project['settings'])\n database = django_settings.DATABASES['default']\n\n # Remote side\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n # Creates dir to store backup, avoiding existing similar names\n dirname = '../backup/%s_%s' % (datetime.date.today().strftime('%Y%m%d'), env.environment)\n path = dirname\n index = 0\n while files.exists(path) or files.exists('%s.tar.gz' % path):\n index += 1\n path = '%s.%s' % (dirname, index)\n run('mkdir -p %s' % path)\n\n # Backup MySQL\n run('mysqldump %s -u %s -p%s %s > %s/%s.sql' % (\n '-h %s' % database['HOST'] if database.get('HOST', None) else '',\n database['USER'],\n database['PASSWORD'],\n database['NAME'],\n path,\n env.project['project'],\n ))\n\n # Backup extra files\n extra_backup_files = env.project.get('extra_backup_files', [])\n for file in extra_backup_files:\n run('cp -R %s %s/' % (file, path))\n\n # Create .tar.gz and removes uncompressed files\n with hide('stdout'):\n run('tar -czvf %s.tar.gz %s/' % (path, path))\n run('rm -rf %s/' % path)\n\n # Download backup?\n if console.confirm('Download backup?'):\n return get('%s.tar.gz' % path, '../backup')", "def backup(cfg: 'Settings', server: str, specific_path: str | None = None):\n\n try:\n server_path = find_server(cfg.parent_directory, server)\n except (ParentDirMissing, ServerNotFound) as e:\n e.log_this()\n return\n except NoInvocation:\n pass\n\n if specific_path:\n _source = server_path / specific_path\n if _source.exists():\n specification = {\n 'include': [_source],\n 'exclude': [],\n }\n else:\n try:\n Path(specific_path).relative_to(server_path)\n except ValueError:\n log.error(f'{specific_path} is not a subpath of {server_path}!')\n return\n else:\n specification = {\n 'include': [specific_path],\n 'exclude': [],\n }\n else:\n try:\n specification = get_backup(server_path)\n except NothingToBackup as e:\n e.log_this()\n return\n else:\n if not specification['include']:\n log.warning(f'Back up job for {server} failed, nothing to back up!')\n return\n\n log.info(f'Starting backup for {server}...')\n if isUp(server):\n log.info(f'{server} is running, announcing backup and toggling save!')\n screenCmd(server, 'Starting Backup!', 'save-off', 'save-all')\n sleep(10)\n\n now = time()\n now_str = datetime.now().strftime('%Y.%m.%d_%H_%M_%S')\n\n backup_location = cfg.backup_directory / server\n backup_location.mkdir(parents=True, exist_ok=True)\n\n log.info('Cleaning up backups...')\n\n for d in backup_location.iterdir():\n if d.is_dir() and not d.name.startswith('.'):\n if d.stat().st_mtime < now - (cfg.backup_maxAge * 60):\n for e in d.iterdir():\n if e.is_file():\n e.unlink()\n log.info(f'Deleted \\'{e}\\'')\n if e.is_dir():\n log.warning(f'Found directory {e.name} in {d} during cleanup!')\n log.warning(\n f'Please remove {e} manually if it is no longer needed!'\n )\n try:\n d.rmdir()\n except OSError:\n log.warning(\n f'Outdated backup directory {d} could not be fully removed!'\n )\n log.warning(\n 'This is likely because an unpacked backup still exists within.'\n )\n else:\n log.info(f'Cleaned up outdated backup directory \\'{d}\\'')\n\n log.info(f'Creating backup(s) specified for {server}...')\n\n target_path = backup_location / f'{now_str}'\n target_path.mkdir(exist_ok=True)\n\n os.chdir(target_path)\n\n for source_path in specification['include']:\n log.info(f'Backing up \\'{source_path}\\'...')\n try:\n filename = source_path.relative_to(server_path)\n except ValueError:\n log.critical(f'\\'{source_path}\\' is not a subpath of the specified server!')\n log.error(\n 'This should not be possible. Backup aborted! Please contact someone!'\n )\n return\n else:\n filename = '.'.join(filename.parts)\n\n exclusions = [\n f'{p.relative_to(source_path)}'\n for p in specification['exclude']\n if p.is_relative_to(source_path)\n ]\n\n def _filter(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo | None:\n if any(tarinfo.name.startswith(ex) for ex in exclusions):\n return None\n else:\n return tarinfo\n\n with tarfile.open(f'{filename}.tar.gz', 'w:gz') as tf:\n if exclusions:\n tf.add(source_path, source_path.name, filter=_filter)\n else:\n tf.add(source_path, source_path.name)\n log.info(f'\\'{source_path}\\' backed up!')\n\n log.info(f'Backup(s) created for {server}!')\n\n if isUp(server):\n log.info(f'{server} is running, re-enabling save!')\n screenCmd(server, 'save-on', 'Backup complete!')", "def backup_files(files=[]):\n\n if not File.backup_text(self.get_title()): return\n if Settings.get_destination() == \"remote\":\n Remote.upload_files(files)\n elif Settings.get_destination() == \"google\":\n Google.upload_files(files)\n else:\n for file in files:\n file.backup()\n return True", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def backup(ctx, project, origin, force):\n\n if not check_main_conf(ctx):\n return\n\n if origin is not None and project is None:\n click.echo(\"--project option is required when --origin is set.\")\n return\n\n bkp = ctx.obj[\"bkp\"]\n\n if not os.path.exists(ctx.obj[\"PROJECTS_DIR\"]):\n click.echo(\"Projects directory doesn't exists at %s\" % ctx.obj[\"PROJECTS_DIR\"])\n return\n\n if project is not None:\n bkp.project_load(project_name=project)\n bkp.backup(origin=origin, force=force)\n else:\n for file in os.listdir(ctx.obj[\"PROJECTS_DIR\"]):\n if file.endswith(\".conf\"):\n project_name = file.replace(\".conf\", \"\")\n bkp.project_load(project_name=project_name)\n bkp.backup(origin=origin, force=force)", "def archive_writeup(syn, evaluation, stat=\"VALIDATED\", reArchive=False):\n if type(evaluation) != synapseclient.Evaluation:\n evaluation = syn.getEvaluation(evaluation)\n\n print(\"\\n\\nArchiving\", evaluation.id, evaluation.name)\n print(\"-\" * 60)\n\n for sub, status in syn.getSubmissionBundles(evaluation, status=stat):\n # retrieve file into cache and copy it to destination\n checkIfArchived = filter(\n lambda x: x.get(\"key\") == \"archived\",\n status.annotations['stringAnnos'])\n if len(list(checkIfArchived)) == 0 or reArchive:\n projectEntity = synapseclient.Project(\n 'Archived {} {} {} {}'.format(\n sub.name.replace(\"&\", \"+\").replace(\"'\", \"\"),\n int(round(time.time() * 1000)),\n sub.id,\n sub.entityId))\n entity = syn.store(projectEntity)\n adminPriv = [\n 'DELETE', 'DOWNLOAD', 'CREATE', 'READ', 'CHANGE_PERMISSIONS',\n 'UPDATE', 'MODERATE', 'CHANGE_SETTINGS']\n syn.setPermissions(entity, \"3324230\", adminPriv)\n synapseutils.copy(syn, sub.entityId, entity.id)\n archived = {\"archived\": entity.id}\n status = utils.update_single_submission_status(status, archived)\n syn.store(status)", "def test_backup_failure(self):\n program = RsyncSystemBackup(\n destination='0.0.0.0::module/directory',\n sudo_enabled=False,\n )\n self.assertRaises(ExternalCommandFailed, program.execute)", "def backup(self):\n if self.url is not None:\n\n # zip backup folder\n zipapp.create_archive(self.logs_directory, self.send_zip)\n\n # then send zipped folder to the URL\n try:\n requests.post(self.url, files={\n 'uploaded_file': (os.path.basename(self.send_zip), open(self.send_zip, 'rb')),\n })\n except requests.exceptions.ConnectionError as error:\n print(error)", "def test_buildAllTarballsEnsuresCleanCheckout(self):\n repositoryPath = self.mktemp()\n repository = FilePath(repositoryPath)\n checkoutPath = self.mktemp()\n checkout = FilePath(checkoutPath)\n\n runCommand([\"svnadmin\", \"create\", repositoryPath])\n runCommand([\"svn\", \"checkout\", \"file://\" + repository.path,\n checkout.path])\n\n checkout.child(\"foo\").setContent(\"whatever\")\n self.assertRaises(UncleanWorkingDirectory,\n buildAllTarballs, checkout, FilePath(self.mktemp()))", "def compress(repo, location):\r\n os.chdir(location)\r\n debug(\"Compressing repositories in [%s]...\" % (location), True)\r\n exec_cmd(\"tar -zcvf bitbucket-backup-%s-%s.tar.gz `ls -d *`\" % (repo.get('owner'), datetime.datetime.now().strftime('%Y%m%d%H%m%s')))\r\n debug(\"Cleaning up...\", True)\r\n for d in os.listdir(location):\r\n path = os.path.join(location, d)\r\n if os.path.isdir(path):\r\n exec_cmd(\"rm -rfv %s\" % path)", "def seafile_backup():\n global jobIds\n\n updateRcloneJobStatus()\n\n if any(jobId != None for _, jobId in jobIds.items()):\n abort(423, \"A job is already running\")\n\n # First, backup the databases (per Seafile documentation)\n # https://manual.seafile.com/maintain/backup_recovery/#backup-order-database-first-or-data-directory-first\n # requests.post(\"http://seafile-db:34770/seafile-backup\", headers={\n # Authorization: f'Bearer {authSecret}'\n # })\n\n # Second, queue all the rclone jobs\n fprint(\"Queue'ing up rclone jobs\")\n jobsData = {\n # Backup all the seafile files to remote (backblze B2)\n \"remote\": {\n \"srcFs\": 'battoseafile:',\n \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n },\n # Backup all the seafile files to local backup\n \"local\": {\n \"srcFs\": 'battoseafile:',\n \"dstFs\": '/backup-local-dest/files',\n },\n # TODO: Readd the backups for the db and db data\n # Probably need their own bucket\n # Backup all the seafile db and config files to remote (backblaze B2)\n # \"dbRemote\": {\n # \"srcFs\": 'battoseafile:',\n # \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n # },\n # # Backup all the seafile db and config files to local\n # \"dbLocal\": {\n # \"srcFs\": 'battoseafile:',\n # \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n # }\n # ... and the data ones\n }\n for jobName, jobData in jobsData.items():\n fprint(f\"Queue'ing up rclone job '{jobName}'\")\n resp = requests.post(\"http://test:test@seafile-backups:5572/sync/sync\", data={\n **jobsData,\n \"_async\": True\n }, headers={\n 'Authorization': 'Basic dGVzdDp0ZXN0'\n })\n json = resp.json()\n fprint(json)\n jobIds[jobName] = json[\"jobid\"]\n fprint(f\"Rclone job '{jobName}' got id '{jobIds[jobName]}'\")\n\n return \"success\"", "def submit(self, root=None, force=False, repo=None):\n import ambry.util as du\n \n if repo:\n self.repo_name = repo\n self.set_api()\n \n import os\n from os.path import basename\n \n ckb = self.remote.update_or_new_bundle_extract(self.bundle)\n \n sent = set()\n \n self.remote.put_package(ckb)\n \n for doc in self.bundle.config.group('about').get('documents',[]):\n self.store_document(ckb, doc)\n\n zip_inputs = {}\n\n for extract_data in self.generate_extracts(root=root):\n\n zip = extract_data.get('zip', False)\n will_zip = False\n \n if zip == 'dir':\n zip_inputs[os.path.dirname(extract_data['path'])] = extract_data\n will_zip = True\n elif zip == 'file':\n zip_inputs[extract_data['path']] = extract_data\n will_zip = True\n\n file_ = self._do_extract(extract_data, force=force)\n \n if will_zip:\n self.bundle.log(\"{} will get submitted as a zip\".format(file_))\n elif file_ not in sent:\n r = self._send(ckb, extract_data,file_)\n sent.add(file_)\n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(file_), url))\n else:\n self.bundle.log(\"Already processed {}, not sending.\".format(basename(file_)))\n \n \n zip_outputs = self.zip(zip_inputs.keys() )\n \n \n print zip_outputs\n \n for in_zf, out_zf in zip_outputs.items():\n extract_data = zip_inputs[in_zf]\n extract_data['name'] = extract_data['zipname'] if 'zipname' in extract_data else extract_data['name']\n r = self._send(ckb, extract_data,out_zf)\n \n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(out_zf), url))\n \n \n return True", "def test_restore_backup():", "def test_backup_merge_negative_args(self):\n # This error message is thrown when an invalid date range format is supplied to cbbackupmgr.\n invalid_range_format_error = \"Error merging data: invalid range format, expected two indexes or two dates; the keywords [start, oldest, end, latest] are also valid\"\n\n remote_client = RemoteMachineShellConnection(self.backupset.backup_host)\n self.backup_create()\n cmd = \"merge\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"cbbackupmgr merge [<args>]\", \"Expected error message not thrown\")\n cmd = \"merge --archive -c http://localhost:8091 -u Administrator -p password -r aa\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --archive\", \"Expected error message not thrown\")\n cmd = \"merge --archive {0}\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Flag required, but not specified: -r/--repo\", \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1} -r\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --repo\", \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1}\".format(self.backupset.directory, self.backupset.name)\n command = \"{0}/cbbackupmgr {1} --start start --end end\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Error merging data: Repository 'backup' doesn't contain any backups\",\n \"Expected error message not thrown\")\n self._take_n_backups(n=2)\n cmd = \"merge --archive {0} --repo {1}\".format(self.backupset.directory, self.backupset.name)\n command = \"{0}/cbbackupmgr {1} --start bbb --end end\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], invalid_range_format_error, \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start\".format(self.backupset.directory, self.backupset.name)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --start\", \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start {2}\".format(self.backupset.directory,\n self.backupset.name, self.backups[0])\n command = \"{0}/cbbackupmgr {1} --end aa\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], invalid_range_format_error, \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start {2} --end\".format(self.backupset.directory,\n self.backupset.name, self.backups[0])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --end\", \"Expected error message not thrown\")\n cmd = \"merge --archive xyz --repo {0} --start {1} --end {2}\".format(self.backupset.name,\n self.backups[0], self.backups[1])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(\"Error merging data: archive '{0}xyz' does not exist\".format(self.root_path) in output[-1],\n \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo abc --start {1} --end {2}\".format(self.backupset.directory,\n self.backups[0], self.backups[1])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(\"Error merging data: Backup Repository `abc` not found\" in output[-1],\n \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start abc --end {2}\".format(self.backupset.directory,\n self.backupset.name, self.backups[1])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(invalid_range_format_error in output[-1], \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start {2} --end abc\".format(self.backupset.directory,\n self.backupset.name, self.backups[0])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(invalid_range_format_error in output[-1], \"Expected error message not thrown\")\n cmd = \"merge --archive {0} --repo {1} --start {2} --end {3}\".format(self.backupset.directory,\n self.backupset.name,\n self.backups[1], self.backups[0])\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n remote_client.disconnect()\n self.assertTrue(\"Error merging data: invalid range start cannot be before end\" in output[-1], \"Expected error message not thrown\")", "def main():\n\n # get all repos a user has access to\n gh = Github(options.username, options.pat)\n user = gh.get_user()\n # filter for those under the user account\n userrepos = {\n repo.name : repo.git_url for repo in user.get_repos() \\\n if repo.git_url.startswith(\"git://github.com/\" + options.username)\n }\n # create a backup dir\n dirname = datetime.today().strftime(\"%Y%m%d-%H%M%S\")\n os.makedirs(\"./backup/\" + dirname)\n # clone all user repos\n for k, v in userrepos.items():\n url = \"https://\" + options.pat + \"@\" + v.removeprefix(\"git://\")\n subprocess.check_call([\n \"git\",\n \"clone\",\n url,\n \"./backup/\" + dirname + \"/\" + k\n ])", "def backup_files(self):\n backup_path = os.path.join(self.backupdir, self.get_timestamp().replace(':', '-'))\n try:\n if not os.path.exists(backup_path):\n self.make_path(backup_path)\n if not os.path.exists(backup_path):\n raise IOError('Path was not made correctly')\n else:\n self.print_to_log('Backup path: %s' % backup_path)\n for item in self.file_list:\n try:\n self.print_to_log('Backing up file: %s' % item)\n shutil.copy(item, backup_path)\n except IOError, why:\n self.error = 2\n self.print_to_log(str(why))\n self.print_to_log('Unable to archive file: %s continuing' % item)\n except IOError, why:\n self.print_to_log(str(why))\n self.print_to_log('Quiting with out archiving')\n self.error = 1" ]
[ "0.62302047", "0.5959753", "0.5957811", "0.5861498", "0.57466793", "0.5734922", "0.5635272", "0.5594395", "0.5587171", "0.55635685", "0.54897755", "0.5474992", "0.5465246", "0.5372894", "0.5349092", "0.5341547", "0.53170043", "0.53014684", "0.52790445", "0.52441865", "0.5213991", "0.5196122", "0.5187348", "0.5182774", "0.51714236", "0.51448965", "0.5140829", "0.5139741", "0.5138605", "0.51233107" ]
0.7684872
0
Retrieve the stop words for vectorization Feel free to modify this function
def stop_words(): return get_stop_words('es') + get_stop_words('ca') + get_stop_words('en')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStopWords(self):\n\t\treturn self.stop_words", "def get_stop_words(self):\n self._normalize_params()\n return self.stop_words", "def getStopWords():\n import os\n cur_dir = os.getcwd()\n\n dk_addition = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','danish_stopwords.txt'), encoding=\"utf-8\")] # danish stopword list\n\n customer_specific_words = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_lda_customer_specific.txt'), encoding=\"utf-8\")] # customer specific\n dk_addition.extend(customer_specific_words)\n\n stopwords_1gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_1gram.txt'), encoding=\"utf-8\")] # stopwords 1grams\n dk_addition.extend(stopwords_1gram)\n\n stopwords_2gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_2gram.txt'), encoding=\"utf-8\")] # stopwords 2grams\n dk_addition.extend(stopwords_2gram)\n \n stopwords_3gram = [line.rstrip('\\n') for line in open(os.path.join(cur_dir,'utils','stopwords_3gram.txt'), encoding=\"utf-8\")] # stopwords 3grams\n dk_addition.extend(stopwords_3gram)\n \n # nltk\n stopwords = nltk.corpus.stopwords.words('danish')\n stopwords.extend(dk_addition)\n stopwords = list(set(stopwords))\n return stopwords", "def getStopWords(self):\n return self.getOrDefault(self.stopWords)", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))", "def remove_stop_words(tweet):\n tokens_without_sw = \"\"\n for word in tweet.split():\n if not word.lower() in STOPWORDS:\n tokens_without_sw += word.lower() + \" \"\n return tokens_without_sw", "def __get_stopwords():\n\n try:\n stopwords = nltk.corpus.stopwords.words('english')\n except LookupError:\n nltk.download('stopwords')\n stopwords = nltk.corpus.stopwords.words('english')\n\n return stopwords", "def test_stopwords():\n assert TextNormalizer().transform([[\"a b\"]])[\"corpus\"][0] == [\"b\"]", "def remove_stopwords(data):\n stop_words = stopwords.words('english')\n words = word_tokenize(str(data))\n new = \"\"\n for word in words:\n if word not in stop_words and len(word) > 1:\n new = new + \" \" + word\n return new", "def remove_stopwords(words):\n new_words = []\n for word in words:\n # print(word)\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def construct_stop_words():\n stop_words_list = [\"uk\", \"ceo\", \"apple\", \"wal\", \"st\", \"q1\", \"q2\", \"q3\", \"q4\",\n \"bp\", \"wednesday\", \"tuesday\", \"monday\", \"thursday\", \"friday\", \"sept\", \"johnson\", \"inc\",\n \"david\", \"amazon.com\"]\n\n for words in stop_words_list:\n STOP_WORDS.add(words)\n\n return STOP_WORDS", "def get_words_in(body):\n prepared_text = _prepare_text(body)\n tokens = nltk.word_tokenize(prepared_text)\n tags = nltk.pos_tag(tokens)\n lemmatized = [_lemmatize(tag) for tag in tags]\n no_stop = [word for word in lemmatized if word not in _stopwords]\n return no_stop", "def _get_stopwords():\n all_stopwords = many_stop_words.get_stop_words('ru')\n all_stopwords.update(many_stop_words.get_stop_words('en'))\n\n more_stopwords = set(stopwords.words(['russian', 'english']))\n all_stopwords.update(more_stopwords)\n\n return all_stopwords", "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens", "def stopword_filter(words):\n new_words = []\n for w in words:\n if w in stopwords.words(\"german\"): continue\n else: new_words += [w]\n return new_words", "def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopword_list:\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\r\n new_words = []\r\n for word in words:\r\n if word not in stopwords.words('english'):\r\n new_words.append(word)\r\n return new_words", "def getTerms(s): \n cleaned = [clean.sub('', t.lower()) for t in s.split()]\n return [t for t in cleaned if t not in stop_words]", "def clean_stopwords_lemmatize(text):\n tokens = clean_stopwords(text)\n tokens = lemmatize_tokens(tokens)\n # count = Counter(tokens)\n # c = count.most_common(15)\n # b = [str(i[0]) for i in c]\n # keywords = [t for t in tokens if t in b]\n news = ['ESPN', 'espn', 'foxsports', 'fox', 'cnn', 'yahoo', '•', '-', '●']\n keywords = [k for k in tokens if not k in news]\n return keywords", "def stop_word(w): # local feature\n return (w in swl)", "def removeStopwords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() not in self._stopwords:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)", "def remove_stopwords(self,text):\n return \" \".join([word for word in str(text).split() if word not in self.STOPWORDS])", "def _remove_stopwords(self, words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def _remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words" ]
[ "0.76995164", "0.7694002", "0.7495476", "0.7411517", "0.7385416", "0.7310153", "0.7292719", "0.7272324", "0.720781", "0.72043383", "0.71629584", "0.71625566", "0.7147034", "0.71461827", "0.7064063", "0.69907856", "0.6974882", "0.6970869", "0.6969426", "0.6963172", "0.696132", "0.69536525", "0.6952992", "0.6948023", "0.69453406", "0.69290924", "0.6919243", "0.6919243", "0.6919243", "0.6919243" ]
0.7706719
0
Sets the app_id of this SharedSecretsStore.
def app_id(self, app_id): self._app_id = app_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_id(self, app_id):\n self._app_id = app_id", "def setAppID(self, appid):\n\t\tself.config.APP_ID = appid", "def application_id(self, application_id):\n\n self._application_id = application_id", "async def slashtagset_appid(self, ctx: commands.Context, id: int = None):\n app_id = id or self.bot.user.id\n await self.config.application_id.set(app_id)\n self.application_id = app_id\n await ctx.send(f\"Application ID set to `{id}`.\")", "def app_version_id(self, app_version_id):\n\n self._app_version_id = app_version_id", "def with_application_id(self, application_id):\n if not isinstance(application_id, str):\n raise TypeError('Application Id must be a string')\n\n self.application_id = application_id\n\n return self", "def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0", "def set_application(self, app):\n \n self.app = app", "def apple_id(self, apple_id):\n\n self._apple_id = apple_id", "def app_id(self):\n return self._app_id", "def app_id(self) -> str:\n return self._app_id", "def app(self, app):\n\n self._app = app", "def appid(self):\n return self._item[\"appid\"]", "def set_app(self, app):\n self._app = app\n\n # Let the subclass choose the authentication method.\n self._authenticator = self._set_authenticator()", "def site_id(self, site_id):\n\n self._site_id = site_id", "def site_id(self, site_id):\n\n self._site_id = site_id", "def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])", "def game_id(self, game_id):\n\n self._game_id = game_id", "def game_id(self, game_id):\n\n self._game_id = game_id", "def game_id(self, game_id):\n\n self._game_id = game_id", "async def update_app_oauth(self, app_id: str, data: dict) -> dict:\r\n return await self.put(API_APP_OAUTH.format(app_id=app_id), data)", "def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "async def statset_appkey(self, key):\n self._set_app_key(key)\n await self.bot.say(\"APP key successfully set.\")", "def app_name(self, value):\n self._app_name = value", "def settings_app_password(self, settings_app_password):\n\n self._settings_app_password = settings_app_password", "def set_app_key(self, app_key: bytes) -> None:\n\n if len(app_key) != 16:\n raise ValueError('app key must be exactly 16 bytes long')\n\n cmd = b'\\x43\\x10' + app_key[::-1]\n try:\n self._serial.transmit(cmd)\n self._get_reply(0x43, 0, 2)\n finally:\n self._gpio.sleep()\n\n return" ]
[ "0.8030798", "0.7907268", "0.72013015", "0.711682", "0.6553479", "0.65487945", "0.63896734", "0.6226906", "0.6072402", "0.59553057", "0.5932596", "0.58469903", "0.5838617", "0.58236474", "0.5753086", "0.5753086", "0.5690268", "0.5654728", "0.5654728", "0.5654728", "0.55475783", "0.5521897", "0.55078864", "0.55078864", "0.55078864", "0.55078864", "0.5504932", "0.5490411", "0.5481947", "0.5460556" ]
0.79765457
1
Sets the ca_cert of this SharedSecretsStore.
def ca_cert(self, ca_cert): self._ca_cert = ca_cert
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path", "def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert", "def cert(self, value):\n self._cert = value", "def client_cert(self, client_cert):\n\n self._client_cert = client_cert", "def save_ca():\n cert_file = os.environ.get('HOME') + '/.cat_installer/ca.pem'\n debug(\"saving cert\")\n with open(cert_file, 'w') as cert:\n cert.write(Config.CA + \"\\n\")", "def token_cert(self, token_cert):\n\n self._token_cert = token_cert", "def certificate(self, certificate):\n\n self._certificate = certificate", "def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")", "def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")", "def insert_ca_certs_into_systemwide_ca_store(self, ca_certs):\n\n raise NotImplementedError()", "def ca_cert_path(self) -> str:\n return self._ca_cert_path", "def ca_certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_certificate\")", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def client_x509_cert_url(self, client_x509_cert_url):\n\n self._client_x509_cert_url = client_x509_cert_url", "def setCertfile(self, certfile):\r\n if not os.access(certfile, os.R_OK):\r\n raise IOError('No such certfile found: %s' % (certfile))\r\n self.certfile = certfile", "def org_apache_felix_https_clientcertificate(self, org_apache_felix_https_clientcertificate: ConfigNodePropertyDropDown):\n\n self._org_apache_felix_https_clientcertificate = org_apache_felix_https_clientcertificate", "def test_set_and_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def mixed_set_add_ca(ctx):\n ctx.set_client_ca_list([cadesc, sedesc])\n ctx.add_client_ca(clcert)\n return [cadesc, sedesc, cldesc]\n\n self._check_client_ca_list(mixed_set_add_ca)", "def client_certificate_id(self, client_certificate_id):\n\n self._client_certificate_id = client_certificate_id", "def test_set_after_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n\n def set_replaces_add_ca(ctx):\n ctx.add_client_ca(clcert)\n ctx.set_client_ca_list([cadesc])\n ctx.add_client_ca(secert)\n return [cadesc, sedesc]\n\n self._check_client_ca_list(set_replaces_add_ca)", "def console_custom_cert(self, console_custom_cert):\n\n self._console_custom_cert = console_custom_cert", "def set_cid(self, cid):\n self.__cid = cid", "def set_cid(self, cid):\n self.__cid = cid", "def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)", "def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()", "def certificate_issuer_id(self, certificate_issuer_id):\n\n self._certificate_issuer_id = certificate_issuer_id", "def install_ca():\n require_root()\n\n config.proxy.install_ca_cert()\n log.info('OK')", "def initca(ca_dir):\n click.echo('Initiliasing new CA in %s' % ca_dir)\n sca = SimpleCA(ca_dir)\n try:\n sca.init_ca()\n except FileExistsError as err:\n click.echo('The CA directory (%s) exists, not doing anything' %\n err.filename)\n exit(1)", "def server_auth_ca_ids(self, server_auth_ca_ids):\n\n self._server_auth_ca_ids = server_auth_ca_ids", "def mdm_signing_certificate(self, mdm_signing_certificate):\n\n self._mdm_signing_certificate = mdm_signing_certificate", "def tls_ca_certificate_pem_path(ca):\n with ca.cert_pem.tempfile() as ca_cert_pem:\n yield ca_cert_pem" ]
[ "0.70366275", "0.69069916", "0.66452664", "0.64642173", "0.615436", "0.60478634", "0.58376104", "0.5587908", "0.5587908", "0.5510349", "0.53918797", "0.53680134", "0.53367555", "0.5236249", "0.5180108", "0.51549375", "0.5105433", "0.508126", "0.5059504", "0.5050953", "0.50126463", "0.50126463", "0.49863905", "0.49333575", "0.49240828", "0.49167705", "0.48861372", "0.48711774", "0.4795537", "0.47795328" ]
0.8026556
0
Sets the client_cert of this SharedSecretsStore.
def client_cert(self, client_cert): self._client_cert = client_cert
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_x509_cert_url(self, client_x509_cert_url):\n\n self._client_x509_cert_url = client_x509_cert_url", "def client_certificate_id(self, client_certificate_id):\n\n self._client_certificate_id = client_certificate_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def set(self, client):\n if not client:\n raise SurvoxAPIMissingParameter('client')\n c = self.get()\n if not c:\n raise SurvoxAPIRuntime('No client available named: {name}'.format(name=self.name))\n return self.api_put(endpoint=self.url, data=client)", "def cert(self, value):\n self._cert = value", "def org_apache_felix_https_clientcertificate(self, org_apache_felix_https_clientcertificate: ConfigNodePropertyDropDown):\n\n self._org_apache_felix_https_clientcertificate = org_apache_felix_https_clientcertificate", "def token_cert(self, token_cert):\n\n self._token_cert = token_cert", "def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id", "def store_client_credentials(self, client_id, credentials):\n if self._dry_run:\n return\n if type(client_id) == unicode:\n client_id = client_id.encode('ascii')\n store = self._load_credential_store()\n store[client_id] = credentials\n store.close()", "def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert", "def setCooperationClient(self, client):\n self.__cooperationClient = client", "def init_client(self, client):\n self.client = client", "def client_email(self, client_email):\n\n self._client_email = client_email", "def server_side_encryption_key(self, server_side_encryption_key):\n\n self._server_side_encryption_key = server_side_encryption_key", "def client_addresses(self, client_addresses):\n\n self._client_addresses = client_addresses", "def org_apache_felix_https_clientcertificate(self) -> ConfigNodePropertyDropDown:\n return self._org_apache_felix_https_clientcertificate", "def client_certificate(self) -> str:\n return pulumi.get(self, \"client_certificate\")", "def client_certificate(self) -> str:\n return pulumi.get(self, \"client_certificate\")", "def setCertfile(self, certfile):\r\n if not os.access(certfile, os.R_OK):\r\n raise IOError('No such certfile found: %s' % (certfile))\r\n self.certfile = certfile", "def setClientClockDrift(self, drift):\n self.clientClockDrift = drift", "def signed_session(self, session=None):\n\n if session:\n session = super(ClientCertAuthentication, self).signed_session(session)\n else:\n session = super(ClientCertAuthentication, self).signed_session()\n\n if self.cert is not None:\n session.cert = self.cert\n if self.ca_cert is not None:\n session.verify = self.ca_cert\n if self.no_verify:\n session.verify = False\n\n return session", "def cert_id(self, cert_id):\n if (\n self.local_vars_configuration.client_side_validation and cert_id is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `cert_id`, must not be `None`\"\n ) # noqa: E501\n\n self._cert_id = cert_id", "def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path", "def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None, nx=False):\r\n if client is None:\r\n key = self.make_key(key, version=version)\r\n client = self.get_server(key)\r\n\r\n return super(ShardClient, self).set(key=key, value=value,\r\n timeout=timeout, version=version,\r\n client=client, nx=nx)", "def shopkeeper(self, shopkeeper):\n\n self._shopkeeper = shopkeeper" ]
[ "0.6916911", "0.6538688", "0.6388087", "0.6388087", "0.6388087", "0.6388087", "0.6272879", "0.61620885", "0.6154874", "0.59240377", "0.5855223", "0.5651909", "0.5651909", "0.5648323", "0.5564449", "0.55489093", "0.5483793", "0.5316402", "0.523795", "0.52187634", "0.5173961", "0.50862724", "0.50862724", "0.50285864", "0.5003786", "0.50008196", "0.49994457", "0.49686053", "0.4933964", "0.49217743" ]
0.81401896
0
Sets the credential_id of this SharedSecretsStore.
def credential_id(self, credential_id): self._credential_id = credential_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def credential(self, credential):\n\n self._credential = credential", "def credential(self, credential):\n\n self._credential = credential", "def store_client_credentials(self, client_id, credentials):\n if self._dry_run:\n return\n if type(client_id) == unicode:\n client_id = client_id.encode('ascii')\n store = self._load_credential_store()\n store[client_id] = credentials\n store.close()", "def credentials(self, credentials):\n\n self._credentials = credentials", "async def store_credential(\n self, cred_ex_record: V20CredExRecord, cred_id: str = None\n ) -> None:", "def update_credential(self, context, id, credential):\n c = credential['credential']\n cred = update_credential(id,\n c['user_name'],\n c['password'])\n return self._make_credential_dict(cred)", "def add_credential(self, authenticator_id, credential):\n pass", "def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None", "def save_credential(self):\n Credentials.credentials_list.append(self)", "def credential(self, value):\n credential = self.organization.get_credential_by_name_with_type_id(value,\n self.credential._data.get('credential_type'))\n if not credential:\n raise InvalidCredential(value)\n self._update_values('credential', credential.id)", "def _update_credential(self, key, cred):\n self._data[key] = cred\n self._write()", "def set_credentials_helper(cls, cred_helper):\n cls.credentials_helper = cred_helper", "def SetCredentials(self, credentials):\n self._session[_CREDENTIAL_KEY] = credentials\n self._ReCreateUserInfo(credentials)", "def remove_credential(self, authenticator_id, credential_id):\n pass", "def put(self, credential):\n pass", "def set_cred(self, cred):\n self.cred = cred\n self.dirty = False", "def save_credentials(self):\n Credentials.credential_list.append(self)", "def account_id(self, account_id):\n self._account_id = account_id", "def save_credential(self):\n\n Credential.credential_list.append(self)", "def entity_id(self, entity_id: str):\n\n self._entity_id = entity_id", "def save_credentials(self):\n Credentials.credentials_list.append(self)", "def save_credential(self):\n Credential.credential_list.append(self)", "def credentials(self, credentials):\n if credentials is None:\n raise ValueError(\"Invalid value for `credentials`, must not be `None`\") # noqa: E501\n\n self._credentials = credentials", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def set_google_id(self, google_id):\n self._google_id = google_id" ]
[ "0.65154755", "0.65154755", "0.6216953", "0.61650056", "0.5938336", "0.58274084", "0.57338697", "0.57093775", "0.568957", "0.56819266", "0.56121373", "0.5577215", "0.55734926", "0.55087495", "0.5478372", "0.5451043", "0.54476315", "0.5434649", "0.53920776", "0.5381632", "0.53796893", "0.53764987", "0.5375691", "0.53720576", "0.53720576", "0.53720576", "0.53720576", "0.53720576", "0.53720576", "0.53638995" ]
0.80363697
0
Get elements in request and inject them in args_view or kwargs_view.
def inject(self, request: BaseRequest, args_view: list, kwargs_view: dict):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def request_vars(self):", "def _get_view_and_args(path, request):\n # Let's use urlconf from request object, if available:\n urlconf = getattr(request, \"urlconf\", settings.ROOT_URLCONF)\n resolver = RegexURLResolver(r\"^/\", urlconf)\n return resolver.resolve(path)", "def dispatch_request(self, *args, **kwargs):\n args = (request,) + args\n return super(View, self).dispatch_request(*args, **kwargs)", "def request_args(args):\n\n def _decorator(fn):\n fn.args = args\n return fn\n\n return _decorator", "def initialize(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def inject_get_param(request, injectionstring):\r\n requests = []\r\n return requests", "def __init__(self, request):\n self.arguments = {}\n for k, v in request.GET.items():\n self.arguments.setdefault(k, []).append(v)\n\n self.full_url = lambda: request.url\n self.host = request.host\n self.path = request.path", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def extend_with_args(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if request.get_json():\n kwargs.update(request.get_json())\n\n # WARNING: This may get us into trouble when it comes to\n # list-style \"get\" args.\n kwargs.update({k: v for k, v in request.args.iteritems()})\n\n # \"auth\" is special -- it's an authentication token, not an\n # argument.\n if \"auth\" in kwargs:\n kwargs.pop(\"auth\")\n\n # This is JUST for the 1.0 sidebar app.\n if \"access_token\" in kwargs:\n kwargs.pop(\"access_token\")\n\n return f(*args, **kwargs)\n return wrapper", "def test_passes_on_args(self):\n record = []\n\n @self.actions(\"ctx_name\", [])\n def myview(request, *args, **kwargs):\n record.extend([args, kwargs])\n\n myview(self.req(\"get\", \"/\"), \"a\", b=2)\n\n self.assertEqual(record, [(\"a\",), {\"b\": 2}])", "def from_request(request=None) -> dict:\n\n request = request if request else flask_request\n\n try:\n json_args = request.get_json(silent=True)\n except Exception:\n json_args = None\n\n try:\n get_args = request.values\n except Exception:\n get_args = None\n\n arg_sources = list(filter(\n lambda arg: arg is not None,\n [json_args, get_args, {}]\n ))\n\n return arg_sources[0]", "def _view(self, request, **kwargs):\n return self._dispatch(request, **kwargs)", "def __call__(request):", "def _request(self, *args, **kwargs):\n request = self._make_request(*args, **kwargs)\n\n return self._collect_request(request)", "def urlfor( request, *args, **kwargs ):", "def add_view( *args, **kwargs ):", "def _get_method_args(self, method, request, params):\n idx = 0\n\n if method.__servicemethod__['store_arg']:\n params.insert(idx, method.__servicemethod__['store'])\n idx += 1\n\n if method.__servicemethod__['request_arg']:\n params.insert(idx, request)\n\n return params", "def _get_url_params_as_dict(_request):\n return _multi_dict_to_dict(_request.args)", "def get_args():\n # Strip anything other than characters listed\n starting_view = pattern.sub(\"\", request.form.get(\"starting_view\"))\n envelope_id = \"envelope_id\" in session and session[\"envelope_id\"]\n args = {\n \"envelope_id\": envelope_id,\n \"starting_view\": starting_view,\n \"account_id\": session[\"ds_account_id\"],\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"],\n \"ds_return_url\": url_for(\"ds.ds_return\", _external=True),\n }\n\n return args", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n setattr(request, \"session\", \"session\")\n messages = FallbackStorage(request)\n setattr(request, \"_messages\", messages)\n return view", "def inject_post_param(request, injectionstring):\r\n requests = []\r\n return requests", "def _get_request_args(self):\n str_args = False\n request_args = {}\n if request.method == \"POST\" or request.method == \"PUT\":\n # Use only body args and ignore any args from query string\n if request.headers.get(\"content-type\", \"\").startswith(CONT_TYPE_JSON):\n # JSON body request\n if request.data:\n request_args = json_loads(request.data)\n if GATEWAY_ARG_PARAMS not in request_args:\n # Magic fallback: Directly use JSON first level as args if params key not present\n request_args = {GATEWAY_ARG_PARAMS: request_args}\n elif request.form:\n # Form encoded payload\n if GATEWAY_ARG_JSON in request.form:\n payload = request.form[GATEWAY_ARG_JSON]\n request_args = json_loads(payload)\n if GATEWAY_ARG_PARAMS not in request_args:\n # Magic fallback: Directly use JSON first level as args if params key not present\n request_args = {GATEWAY_ARG_PARAMS: request_args}\n else:\n # Fallback: Directly use form values\n str_args = True\n request_args = {GATEWAY_ARG_PARAMS: request.form.to_dict(flat=True)}\n else:\n # No args found in body\n request_args = {GATEWAY_ARG_PARAMS: {}}\n\n # Extract file args\n for file_arg in request.files:\n try:\n file_handle = request.files[file_arg]\n arg_val = file_handle.read()\n request_args[GATEWAY_ARG_PARAMS][file_arg] = arg_val\n except Exception as ex:\n log.exception(\"Error reading request file argument %s\", file_arg)\n\n elif request.method == \"GET\":\n str_args = True\n REQ_ARGS_SPECIAL = {\"authtoken\", \"timeout\", \"headers\"}\n args_dict = request.args.to_dict(flat=True)\n request_args = {k: request.args[k] for k in args_dict if k in REQ_ARGS_SPECIAL}\n req_params = {k: request.args[k] for k in args_dict if k not in REQ_ARGS_SPECIAL}\n request_args[GATEWAY_ARG_PARAMS] = req_params\n\n request_args[\"str_args\"] = str_args # Indicate downstream that args are str (GET or form encoded)\n #log.info(\"Request args: %s\" % request_args)\n return request_args", "def get_dict_from_request(request):\n if request.method == 'GET':\n return request.GET\n elif request.method == 'POST':\n return request.POST\n else:\n raise NotImplemented", "def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)", "def mock_as_view(view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def get_view(self, request=None, args=None, kwargs=None, **initkwargs):\n view = self.view_class(**initkwargs)\n view.setup(request, *(args or ()), **(kwargs or {}))\n return view", "def do(request, **kwargs):\n logger.debug(request.raw_post_data)\n if request.method == 'POST':\n method = request.POST.get('_method', 'POST')\n else:\n method = request.method\n return {\n 'get': _show,\n 'post': _create,\n 'put': _update,\n 'delete': _destroy,\n }[method.lower()](request, **kwargs)" ]
[ "0.64548486", "0.63946515", "0.6385206", "0.62623554", "0.6136606", "0.6119369", "0.59940606", "0.59673434", "0.589182", "0.581749", "0.5788581", "0.5761854", "0.5758536", "0.57168823", "0.5716371", "0.5703287", "0.5641552", "0.5633383", "0.56281084", "0.5625707", "0.56136554", "0.5602854", "0.5578837", "0.55727136", "0.55683166", "0.55424315", "0.5503774", "0.54859054", "0.54516345", "0.5444718" ]
0.79954726
0
Returns a hash of attributes which have changed from the old_version to the new_version. Restricts the attributes compared to the list of interesting_attrs passed in.
def changed_attrs(old_version, new_version, interesting_attrs): # Use an OrderedDict so that we preserve the order from interesting_attrs changed = OrderedDict() for attr in interesting_attrs: if attr in old_version and attr not in new_version: changed[attr] = [old_version[attr], None] elif attr in new_version and attr not in old_version: changed[attr] = [None, new_version[attr]] elif old_version[attr] != new_version[attr]: changed[attr] = [old_version[attr], new_version[attr]] return changed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changed_attrs_by_version(model, interesting_attrs):\n changed = OrderedDict()\n history = reversion.get_for_object(model).order_by(\"revision__date_created\")\n for index, version in enumerate(history):\n # We're only interested in changes\n if index > 0:\n try:\n old = history[index - 1].field_dict\n new = version.field_dict\n changed[version] = changed_attrs(old, new, interesting_attrs)\n except DeserializationError:\n # Django's deserialisation framework gets upset if it tries to get\n # a model instance from some json or xml and the instance has fields\n # which are no longer in the Model - eg: because you just deleted them\n # in a South migration.\n # In Django 1.5 you can tell it to ignorenonexistent and it'll just work\n # which django-reversion helpfully does since:\n # https://github.com/etianen/django-reversion/issues/221\n # In Django 1.4 there is not this option, and thus django-reversion\n # hits a block when trying to get its' historical versions of the model\n # and passes on this error to us. At which point we can nothing useful\n # with it, and so we just ignore that point in history.\n pass\n return changed", "def diff(self, old_model_sig):\n if not isinstance(old_model_sig, ModelSignature):\n raise TypeError('Must provide a ModelSignature to diff against, '\n 'not a %s.' % type(old_model_sig))\n\n # Go through all the fields, looking for changed and deleted fields.\n changed_fields = OrderedDict()\n deleted_fields = []\n\n for old_field_sig in old_model_sig.field_sigs:\n field_name = old_field_sig.field_name\n new_field_sig = self.get_field_sig(field_name)\n\n if new_field_sig:\n # Go through all the attributes on the field, looking for\n # changes.\n changed_field_attrs = new_field_sig.diff(old_field_sig)\n\n if changed_field_attrs:\n # There were attribute changes. Store those with the field.\n changed_fields[field_name] = changed_field_attrs\n else:\n # The field has been deleted.\n deleted_fields.append(field_name)\n\n # Go through the list of added fields and add any that don't\n # exist in the original field list.\n added_fields = [\n field_sig.field_name\n for field_sig in self.field_sigs\n if not old_model_sig.get_field_sig(field_sig.field_name)\n ]\n\n # Build a list of changes to Model.Meta attributes.\n meta_changed = []\n\n if self.has_unique_together_changed(old_model_sig):\n meta_changed.append('unique_together')\n\n if self.index_together != old_model_sig.index_together:\n meta_changed.append('index_together')\n\n if list(self.index_sigs) != list(old_model_sig.index_sigs):\n meta_changed.append('indexes')\n\n if list(self.constraint_sigs) != list(old_model_sig.constraint_sigs):\n meta_changed.append('constraints')\n\n return OrderedDict(\n (key, value)\n for key, value in (('added', added_fields),\n ('changed', changed_fields),\n ('deleted', deleted_fields),\n ('meta_changed', meta_changed))\n if value\n )", "def diff(self, old_model_sig):\n if not isinstance(old_model_sig, ModelSignature):\n raise TypeError('Must provide a ModelSignature to diff against, '\n 'not a %s.' % type(old_model_sig))\n\n # Go through all the fields, looking for changed and deleted fields.\n changed_fields = OrderedDict()\n deleted_fields = []\n\n for old_field_sig in old_model_sig.field_sigs:\n field_name = old_field_sig.field_name\n new_field_sig = self.get_field_sig(field_name)\n\n if new_field_sig:\n # Go through all the attributes on the field, looking for\n # changes.\n changed_field_attrs = new_field_sig.diff(old_field_sig)\n\n if changed_field_attrs:\n # There were attribute changes. Store those with the field.\n changed_fields[field_name] = changed_field_attrs\n else:\n # The field has been deleted.\n deleted_fields.append(field_name)\n\n # Go through the list of added fields and add any that don't\n # exist in the original field list.\n added_fields = [\n field_sig.field_name\n for field_sig in self.field_sigs\n if not old_model_sig.get_field_sig(field_sig.field_name)\n ]\n\n # Build a list of changes to Model.Meta attributes.\n meta_changed = []\n\n if self.has_unique_together_changed(old_model_sig):\n meta_changed.append('unique_together')\n\n if self.index_together != old_model_sig.index_together:\n meta_changed.append('index_together')\n\n if list(self.index_sigs) != list(old_model_sig.index_sigs):\n meta_changed.append('indexes')\n\n return OrderedDict(\n (key, value)\n for key, value in (('added', added_fields),\n ('changed', changed_fields),\n ('deleted', deleted_fields),\n ('meta_changed', meta_changed))\n if value\n )", "def diff(self, old_field_sig):\n if not isinstance(old_field_sig, FieldSignature):\n raise TypeError('Must provide a FieldSignature to diff against, '\n 'not a %s.' % type(old_field_sig))\n\n changed_attrs = [\n attr\n for attr in (set(old_field_sig.field_attrs) |\n set(self.field_attrs))\n if self.get_attr_value(attr) != old_field_sig.get_attr_value(attr)\n ]\n\n # See if the field type has changed.\n old_field_type = old_field_sig.field_type\n new_field_type = self.field_type\n\n if old_field_type is not new_field_type:\n try:\n field_type_changed = (old_field_type().get_internal_type() !=\n new_field_type().get_internal_type())\n except TypeError:\n # We can't instantiate those, so assume the field\n # type has indeed changed.\n field_type_changed = True\n\n if field_type_changed:\n changed_attrs.append('field_type')\n\n # FieldSignature.related_model is not a field attribute,\n # but we do need to track its changes.\n if old_field_sig.related_model != self.related_model:\n changed_attrs.append('related_model')\n\n return sorted(changed_attrs)", "def attr_comparison(obj1,obj2,attrs):\n return [Difference(f\"{obj1.__class__.__name__}.{attr}\",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]", "def _difference_update_attributes(self, data):\n old_attributes = {}\n \n # avatar\n self._update_avatar(data, old_attributes)\n \n # boosts_since\n boosts_since = parse_boosts_since(data)\n if self.boosts_since != boosts_since:\n old_attributes['boosts_since'] = self.boosts_since\n self.boosts_since = boosts_since\n \n flags = parse_flags(data)\n if self.flags != flags:\n old_attributes['flags'] = self.flags\n self.flags = flags\n \n # nick\n nick = parse_nick(data)\n if self.nick != nick:\n old_attributes['nick'] = self.nick\n self.nick = nick\n \n # pending\n pending = parse_pending(data)\n if pending != self.pending:\n old_attributes['pending'] = self.pending\n self.pending = pending\n \n # role_ids\n role_ids = parse_role_ids(data)\n if role_ids != self.role_ids:\n old_attributes['role_ids'] = self.role_ids\n self.role_ids = role_ids\n \n # timed_out_until\n timed_out_until = parse_timed_out_until(data)\n if self.timed_out_until != timed_out_until:\n old_attributes['timed_out_until'] = self.timed_out_until\n self.timed_out_until = timed_out_until\n \n return old_attributes", "def diff(self, old_field_sig):\n if not isinstance(old_field_sig, FieldSignature):\n raise TypeError('Must provide a FieldSignature to diff against, '\n 'not a %s.' % type(old_field_sig))\n\n changed_attrs = [\n attr\n for attr in (set(old_field_sig.field_attrs) |\n set(self.field_attrs))\n if self.get_attr_value(attr) != old_field_sig.get_attr_value(attr)\n ]\n\n # See if the field type has changed.\n old_field_type = old_field_sig.field_type\n new_field_type = self.field_type\n\n if old_field_type is not new_field_type:\n try:\n old_field = old_field_type(**old_field_sig.field_attrs)\n new_field = new_field_type(**self.field_attrs)\n\n field_type_changed = (old_field.get_internal_type() !=\n new_field.get_internal_type())\n except TypeError:\n # We can't instantiate those, so assume the field\n # type has indeed changed.\n field_type_changed = True\n\n if field_type_changed:\n changed_attrs.append('field_type')\n\n # FieldSignature.related_model is not a field attribute,\n # but we do need to track its changes.\n if old_field_sig.related_model != self.related_model:\n changed_attrs.append('related_model')\n\n return sorted(changed_attrs)", "def _state_diff(\n old_state: State, new_state: State\n) -> dict[str, dict[str, dict[str, dict[str, str | list[str]]]]]:\n additions: dict[str, Any] = {}\n diff: dict[str, dict[str, Any]] = {STATE_DIFF_ADDITIONS: additions}\n new_state_context = new_state.context\n old_state_context = old_state.context\n if old_state.state != new_state.state:\n additions[COMPRESSED_STATE_STATE] = new_state.state\n if old_state.last_changed != new_state.last_changed:\n additions[COMPRESSED_STATE_LAST_CHANGED] = new_state.last_changed.timestamp()\n elif old_state.last_updated != new_state.last_updated:\n additions[COMPRESSED_STATE_LAST_UPDATED] = new_state.last_updated.timestamp()\n if old_state_context.parent_id != new_state_context.parent_id:\n additions[COMPRESSED_STATE_CONTEXT] = {\"parent_id\": new_state_context.parent_id}\n if old_state_context.user_id != new_state_context.user_id:\n if COMPRESSED_STATE_CONTEXT in additions:\n additions[COMPRESSED_STATE_CONTEXT][\"user_id\"] = new_state_context.user_id\n else:\n additions[COMPRESSED_STATE_CONTEXT] = {\"user_id\": new_state_context.user_id}\n if old_state_context.id != new_state_context.id:\n if COMPRESSED_STATE_CONTEXT in additions:\n additions[COMPRESSED_STATE_CONTEXT][\"id\"] = new_state_context.id\n else:\n additions[COMPRESSED_STATE_CONTEXT] = new_state_context.id\n if (old_attributes := old_state.attributes) != (\n new_attributes := new_state.attributes\n ):\n for key, value in new_attributes.items():\n if old_attributes.get(key) != value:\n additions.setdefault(COMPRESSED_STATE_ATTRIBUTES, {})[key] = value\n if removed := set(old_attributes).difference(new_attributes):\n # sets are not JSON serializable by default so we convert to list\n # here if there are any values to avoid jumping into the json_encoder_default\n # for every state diff with a removed attribute\n diff[STATE_DIFF_REMOVALS] = {COMPRESSED_STATE_ATTRIBUTES: list(removed)}\n return {ENTITY_EVENT_CHANGE: {new_state.entity_id: diff}}", "def _compare_ioc_properties(old: Dict[str, IOC], new: Dict[str, IOC]):\n new_iocs = set()\n changed_iocs = set()\n removed_iocs = set()\n\n _attributes = [\"macros\", \"pvs\", \"pvsets\", \"simlevel\", \"restart\", \"autostart\"]\n\n for ioc_name in new.keys():\n if ioc_name not in old.keys():\n # If not in previously then add it to new iocs\n new_iocs.add(ioc_name)\n elif any(getattr(old[ioc_name], attr) != getattr(new[ioc_name], attr) for attr in _attributes):\n # If any attributes have changed, add to changed iocs\n changed_iocs.add(ioc_name)\n\n for ioc_name in old.keys():\n if ioc_name not in new:\n removed_iocs.add(ioc_name)\n\n return new_iocs, changed_iocs, removed_iocs", "def get_changes(old_obj: Dict, new_obj: Dict):\n from_ = {}\n to_ = {}\n for key, value in new_obj.items():\n if \"_hash\" not in key:\n if key not in old_obj:\n to_[key] = value\n elif old_obj[key] != value:\n from_[key] = old_obj[key]\n to_[key] = value\n return {\"from\": from_, \"to\": to_}", "def diff(self, old_app_sig):\n if not isinstance(old_app_sig, AppSignature):\n raise TypeError('Must provide an AppSignature to diff against, '\n 'not a %s.' % type(old_app_sig))\n\n deleted_models = []\n changed_models = OrderedDict()\n\n # Process the models in the application, looking for changes to\n # fields and meta attributes.\n for old_model_sig in old_app_sig.model_sigs:\n model_name = old_model_sig.model_name\n new_model_sig = self.get_model_sig(model_name)\n\n if new_model_sig:\n model_changes = new_model_sig.diff(old_model_sig)\n\n if model_changes:\n # There are changes for this model. Store that in the\n # diff.\n changed_models[model_name] = model_changes\n else:\n # The model has been deleted.\n deleted_models.append(model_name)\n\n # Build the dictionary of changes for the app.\n return OrderedDict(\n (key, value)\n for key, value in (('changed', changed_models),\n ('deleted', deleted_models))\n if value\n )", "def determine_changed_sources(self, other: DevJarSignature) -> set[str]:\n res = {}\n all_keys = set(self.modified_sources.keys()) | set(other.modified_sources.keys())\n for key in all_keys:\n if modified_sources.get(key) != other.get(key):\n res.add(key)\n if not res:\n assert self.changed_sources == other.changed_sources\n return res", "def _diff_ns(self, old_ns, new_ns):\n diff = {}\n for info, group in self._all_opt_infos():\n opt = info['opt']\n if not opt.mutable:\n continue\n groupname = group.name if group else None\n try:\n old, _ = opt._get_from_namespace(old_ns, groupname)\n except KeyError:\n old = None\n try:\n new, _ = opt._get_from_namespace(new_ns, groupname)\n except KeyError:\n new = None\n if old != new:\n diff[(groupname, opt.name)] = (old, new)\n return diff", "def model_instance_diff(old, new):\n if not(old is None or isinstance(old, Document)):\n raise TypeError(\"The supplied old instance is not a valid model instance.\")\n if not(new is None or isinstance(new, Document)):\n raise TypeError(\"The supplied new instance is not a valid model instance.\")\n\n diff = {}\n\n if old is not None and new is not None:\n fields = set(list(old._fields.keys()) + list(new._fields.keys()))\n elif old is not None:\n fields = set(list(get_fields_in_model(old).keys()))\n elif new is not None:\n fields = set(list(get_fields_in_model(new).keys()))\n else:\n fields = set()\n\n for field in fields:\n try:\n old_value = smart_text(getattr(old, field, None))\n except Exception as e:\n old_value = None\n\n try:\n new_value = smart_text(getattr(new, field, None))\n except Exception as e:\n new_value = None\n\n if old_value != new_value:\n diff[field] = (smart_text(old_value), smart_text(new_value))\n\n if len(diff) == 0:\n diff = None\n\n return diff", "def diff(self, old, new):\n added = {}\n removed = {}\n updated = {}\n new_keys = set(new.keys())\n old_keys = set(old.keys())\n for key in new_keys.difference(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n added[key] = \"<hidden>\"\n else:\n added[key] = new[key]\n self.stack.pop()\n for key in old_keys.difference(new_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n removed[key] = \"<hidden>\"\n else:\n removed[key] = old[key]\n self.stack.pop()\n for key in new_keys.intersection(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n new_value = new[key]\n old_value = old[key]\n if isinstance(new_value, dict) and isinstance(old_value, dict):\n changes = self.diff(old_value, new_value)\n if changes:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = \"<hidden>\"\n else:\n updated[key] = changes\n elif new_value != old_value:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key]= dict(new_value=\"<hidden>\",\n old_value=\"<hidden>\")\n else:\n updated[key]= dict(new_value= new_value,\n old_value=old_value)\n self.stack.pop()\n result = {}\n if added:\n result['added'] = added\n if removed:\n result['removed'] = removed\n if updated:\n result['updated'] = updated\n return result", "def compare(old, new):\n ret = {}\n\n if not (status(old) and status(new)):\n raise CommandExecutionError(\"Frozen state not found.\")\n\n for ofile, nfile in zip(_paths(old), _paths(new)):\n with fopen(ofile, \"r\") as ofp:\n old_dict = json.load(ofp)\n with fopen(nfile, \"r\") as nfp:\n new_dict = json.load(nfp)\n if ofile.endswith(\"-pkgs.yml\"):\n ret[\"pkgs\"] = salt.utils.dictdiffer.deep_diff(old_dict, new_dict)\n elif ofile.endswith(\"-reps.yml\"):\n ret[\"repos\"] = salt.utils.dictdiffer.deep_diff(old_dict, new_dict)\n\n return ret", "def item_diffs(old_items=None, new_items=None):\n\n if not old_items:\n old_items = {}\n\n if not new_items:\n new_items = {}\n\n new_ids = set(new_items.keys())\n old_ids = set(old_items.keys())\n added = [new_items[x] for x in new_ids.difference(old_ids)]\n removed = [old_items[x] for x in old_ids.difference(new_ids)]\n intersected_ids = new_ids.intersection(old_ids)\n updated = [new_items[x] for x in [x for x in intersected_ids if new_items[x] != old_items[x]]]\n\n return {\n 'added': added,\n 'removed': removed,\n 'updated': updated\n }", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def get_diff(self, old, new, add_all):\n\n adds = []\n dels = []\n\n if old:\n oldcfg = old[0].get('config', '')\n else:\n oldcfg = ''\n\n if new:\n newcfg = new[0].get('config', '')\n else:\n newcfg = ''\n\n if oldcfg and not newcfg:\n dels = new\n elif (newcfg and not oldcfg) or add_all:\n adds = new\n else:\n hash_old = hash(oldcfg)\n hash_new = hash(newcfg)\n if hash_old != hash_new:\n adds = new\n\n return adds, dels", "def compare_contact(c1, c2, unique_id, attrs):\r\n changes = {}\r\n if c1 != c2:\r\n for key in attrs:\r\n if c1[unique_id] != c2[unique_id]:\r\n raise Exception('bad contact comparaison unique_id do not match!')\r\n # copy the unique_id\r\n changes[unique_id] = c1[unique_id]\r\n # copy all values that changed\r\n if c1[key] != c2[key]:\r\n changes[key] = c1[key]\r\n return changes", "def changed(self):\n return set(o for o in self.intersect\n if self.past_dict[o] != self.current_dict[o])", "def diff(self, old_project_sig):\n if not isinstance(old_project_sig, ProjectSignature):\n raise TypeError('Must provide a ProjectSignature to diff against, '\n 'not a %s.' % type(old_project_sig))\n\n changed_apps = OrderedDict()\n deleted_apps = OrderedDict()\n\n for old_app_sig in old_project_sig.app_sigs:\n app_id = old_app_sig.app_id\n new_app_sig = self.get_app_sig(app_id)\n\n if new_app_sig:\n app_changes = new_app_sig.diff(old_app_sig)\n\n if app_changes:\n # There are changes for this application. Store that in the\n # diff.\n changed_apps[app_id] = app_changes\n else:\n # The application has been deleted.\n deleted_apps[app_id] = [\n model_sig.model_name\n for model_sig in old_app_sig.model_sigs\n ]\n\n return OrderedDict(\n (key, value)\n for key, value in (('changed', changed_apps),\n ('deleted', deleted_apps))\n if value\n )", "def diff(self, old_project_sig):\n if not isinstance(old_project_sig, ProjectSignature):\n raise TypeError('Must provide a ProjectSignature to diff against, '\n 'not a %s.' % type(old_project_sig))\n\n changed_apps = OrderedDict()\n deleted_apps = OrderedDict()\n\n for old_app_sig in old_project_sig.app_sigs:\n new_app_sig = self.get_app_sig(old_app_sig.app_id)\n\n if new_app_sig:\n app_changes = new_app_sig.diff(old_app_sig)\n\n if app_changes:\n # There are changes for this application. Store that\n # in the diff.\n changed_apps[new_app_sig.app_id] = app_changes\n else:\n # The application has been deleted.\n deleted_apps[old_app_sig.app_id] = [\n model_sig.model_name\n for model_sig in old_app_sig.model_sigs\n ]\n\n return OrderedDict(\n (key, value)\n for key, value in (('changed', changed_apps),\n ('deleted', deleted_apps))\n if value\n )", "def allow_version_invalid_attributes(self):\n return self._allow_version_invalid_attributes", "def compare_changes(obj, **kwargs):\n changes = {}\n for k, v in obj.items():\n if k in kwargs:\n if v != kwargs[k]:\n changes[k] = kwargs[k]\n return changes", "def model_instance_diff(obj: Any):\n diff = []\n for mapper_property in get_fields_in_model(obj):\n if isinstance(mapper_property, ColumnProperty):\n key = mapper_property.key\n attribute_state = inspect(obj).attrs.get(key)\n history = attribute_state.history\n if history.has_changes():\n diff.append({\n 'field': key,\n 'old': str(history.deleted[0]) if history.deleted else None,\n 'new': str(attribute_state.value)\n })\n return diff", "def _compare_scalars(self, old, new, name=None):\n # Explicitly excluded arguments\n if old != new:\n return {'---': old, '+++': new}\n else:\n return None", "def diff(self, old_app_sig):\n if not isinstance(old_app_sig, AppSignature):\n raise TypeError('Must provide an AppSignature to diff against, '\n 'not a %s.' % type(old_app_sig))\n\n deleted_models = []\n changed_models = OrderedDict()\n meta_changed = OrderedDict()\n\n # Process the models in the application, looking for changes to\n # fields and meta attributes.\n for old_model_sig in old_app_sig.model_sigs:\n model_name = old_model_sig.model_name\n new_model_sig = self.get_model_sig(model_name)\n\n if new_model_sig:\n model_changes = new_model_sig.diff(old_model_sig)\n\n if model_changes:\n # There are changes for this model. Store that in the\n # diff.\n changed_models[model_name] = model_changes\n else:\n # The model has been deleted.\n deleted_models.append(model_name)\n\n # Check for changes to basic metadata for the app.\n for key in ('app_id', 'legacy_app_label'):\n old_value = getattr(old_app_sig, key)\n new_value = getattr(self, key)\n\n if old_value != new_value:\n meta_changed[key] = {\n 'old': old_value,\n 'new': new_value,\n }\n\n # Check if the upgrade method has changed. We have to do this a bit\n # carefully, as the old value might be None, due to:\n #\n # 1. Coming from a version 1 signature (meaning that we only care if\n # there are actual changes to the app and we're also transitioning\n # to Migrations)\n #\n # 2. Coming from a version 2 signature (including a database scan)\n # and the old signature doesn't list an upgrade method for the\n # app (meaning it likely didn't use either evolutions or\n # migrations).\n old_upgrade_method = old_app_sig.upgrade_method\n new_upgrade_method = self.upgrade_method\n old_sig_version = old_app_sig._loaded_sig_version\n\n if (old_upgrade_method != new_upgrade_method and\n ((old_sig_version is None and\n old_upgrade_method is not None) or\n (old_sig_version == 1 and\n (changed_models or deleted_models) and\n old_upgrade_method is None and\n new_upgrade_method != UpgradeMethod.EVOLUTIONS))):\n # The upgrade method has changed. If we're moving to migrations,\n # discard any other changes to the model. We're working with the\n # assumption that the migrations will account for any changes.\n #\n # The assumption may technically be wrong (there may be\n # evolutions to apply before migrations takes over), but we can't\n # easily separate out the changes made by each method. However,\n # since we've recorded a change to this app, the evolver will\n # still apply any remaining evolutions, so we're covered.\n meta_changed['upgrade_method'] = {\n 'old': old_upgrade_method,\n 'new': new_upgrade_method,\n }\n\n if new_upgrade_method == UpgradeMethod.MIGRATIONS:\n # If we're using migrations, we don't want to show any other\n # changes to the models. Those are handled by migrations, and\n # aren't something we want to include in the diff, since they\n # can't be resolved by evolutions.\n changed_models.clear()\n deleted_models = []\n\n # Build the dictionary of changes for the app.\n return OrderedDict(\n (key, value)\n for key, value in (('changed', changed_models),\n ('deleted', deleted_models),\n ('meta_changed', meta_changed))\n if value\n )", "def diff(self, other):\n if not isinstance(other, Article):\n raise TypeError(\"Can only diff two Articles.\")\n\n ndiffs = 0\n # Check for equality first (don't need to print anything in this case)\n if self == other: # defined via __eq__\n return 0\n\n # Compare all attributes except for time added and opened\n attribs = sorted(set(vars(self)) - {\"time_added\", \"time_opened\"})\n # Get field width (for pretty printing)\n maxlen = max(len(attrib) for attrib in attribs)\n # Check individual keys\n for attrib in attribs:\n # We need to convert authors to a string\n if attrib == \"authors\":\n if self.authors is not None:\n old_value = \", \".join(self.format_authors(\"full\"))\n else:\n old_value = None\n if other.authors is not None:\n new_value = \", \".join(other.format_authors(\"full\"))\n else:\n new_value = None\n # Other attributes can be accessed via the dict\n else:\n old_value = attrgetter(attrib)(self)\n new_value = attrgetter(attrib)(other)\n # Compare them\n if old_value is not None and old_value == new_value:\n print(f\"{attrib:>{maxlen}}: {old_value}\")\n else:\n ndiffs += 1\n if old_value is not None:\n print(f\"{attrib:>{maxlen}}: \"\n f\"{_g.ansiDiffRed}- {old_value}{_g.ansiReset}\")\n attrib = \"\" # avoid printing the attribute name twice\n if new_value is not None:\n print(f\"{attrib:>{maxlen}}: \"\n f\"{_g.ansiDiffGreen}+ {new_value}{_g.ansiReset}\")\n return ndiffs", "def compare_changes(obj, **kwargs):\n changes = {}\n for key, value in obj.items():\n if key in kwargs:\n if value != kwargs[key]:\n changes[key] = kwargs[key]\n return changes" ]
[ "0.7093283", "0.63105875", "0.62489676", "0.61689854", "0.6082315", "0.60821235", "0.60707515", "0.5973482", "0.5956527", "0.5847145", "0.57240915", "0.5629343", "0.5622167", "0.5589115", "0.5551084", "0.55124044", "0.55024457", "0.5489399", "0.54752064", "0.5451373", "0.5446589", "0.54295284", "0.5426119", "0.5425422", "0.5425021", "0.5412744", "0.5398867", "0.5395918", "0.53561985", "0.53540635" ]
0.84187317
0
Returns an English string describing a hash of changes, drawn from a dictionary of transition descriptions.
def changes_as_string(changed_attrs, transitions): changes = [] if not changed_attrs: return '' for attr, change in changed_attrs.items(): for transition_description, possible_transitions in transitions[attr].items(): for transition in possible_transitions: if transition == change: changes.append(transition_description) if len(changes) > 2: return '{0} and {1}'.format(', '.join(changes[:-1]), changes[-1]) else: return ' and '.join(changes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cached_state_diff_message(iden: int, event: Event) -> str:\n return _cached_state_diff_message(event).replace(IDEN_JSON_TEMPLATE, str(iden), 1)", "def get_trans_dict(self):\n translated = dict([(k,v) for (k,v) in self._trans_dict.items() if k is not v])\n frm = \" \".join([ c + ' |' for c in translated.keys()])\n to = \" \".join([ c + ' |' for c in translated.values()])\n\n return \"code: \\t{}\\nactual:\\t{}\".format(frm, to)", "def convertStateToHash(values):\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)", "def convertStateToHash(values):\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)", "def stringify_change(change):\n key = change.key\n a = change.a or '<null>'\n b = change.b or '<null>'\n return '{}: {} => {}'.format(key, a, b)", "def to_string(self):\r\n return '\\n'.join([' '.join([trans.start_state, trans.end_state, trans.symbol])\r\n for trans in self.transitions]) + '\\n' + self.start_state + ' ' + ' '.join(self.final_states)", "def hash_status_msg(status):\n messages = {\n 'match': 'Baseline hash matches',\n 'diff': 'Baseline hash differs',\n 'missing': 'Baseline hash not found',\n 'generated': 'Baseline hash was generated',\n }\n return messages[status]", "def _cached_state_diff_message(event: Event) -> str:\n return message_to_json(\n {\"id\": IDEN_TEMPLATE, \"type\": \"event\", \"event\": _state_diff_event(event)}\n )", "def _gen_conf_changes_text(action, title, conf_list):\n if not (conf_list and\n any(c.dirty or c.has_moved for c in conf_list)):\n return \"\"\n\n lines = [\"%s %s Changes:\" % (action, title)]\n\n if any(c.has_moved for c in conf_list):\n lines.append(\" Reordering:\")\n lines.extend([\" %s ==> %s\" % (c.orig_path, c.path)\n for c in conf_list if c.has_moved])\n if any(c.dirty for c in conf_list):\n lines.append(\" Modifying:\")\n lines.extend([\" %s\" % c.path\n for c in conf_list if c.has_moved])\n lines.append(\"\")\n\n return \"\\n\".join(lines)", "def prepareExplainerText(amount, ranges):\n text = \"\\n\"\n for currKey in amount:\n text += f\"{currKey}: {ranges[currKey]} | {amount[currKey]}\\n\"\n text += \"\\n\\n\"\n return text", "def _latex_transition_label_(self, transition,\n format_function=sage.misc.latex.latex):\n return ' '", "def revision_list_to_str(diffs: List[Dict]) -> str:\n return ', '.join([diff_to_str(d['id']) for d in diffs])", "def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])", "def state_transl(state):\n nonlocal state_cnt\n nonlocal state_transl_dict\n\n if state not in state_transl_dict.keys():\n state_transl_dict[state] = state_cnt\n state_cnt += 1\n\n return str(state_transl_dict[state])", "def _latex_transition_label_(self, transition,\n format_function=sage.misc.latex.latex):\n return format_function(transition.word_in)", "def hash_value(board_state):\n res = \"\"\n for i in range(1,10):\n res = res + board_state[i]\n return res", "def help_enhancer(_h):\n return ''.join(reversed(_h))", "def _latex_transition_label_(self, transition,\n format_function=sage.misc.latex.latex):\n return (format_function(transition.word_in) + \"\\\\mid \"\n + format_function(transition.word_out))", "def hash(self) -> str:\r\n ...", "def get_hash():\n return render(build_hash('command'),False)", "def pretty_state(state, was_changed):\n pstate = state + \" \"\n if was_changed:\n pstate = state + \"*\"\n if not uses_rich:\n return pstate\n\n scols = {\n # forks\n 'wg' : 'gray',\n 'wu' : 'yellow',\n 'wd' : 'blue',\n\n # philosophers\n 'sl' : 'green',\n 'wt' : 'yellow',\n 'gl' : 'cyan',\n 'gr' : 'bright_cyan',\n 'es' : 'purple',\n 'dl' : 'yellow',\n 'dr' : 'bright_yellow',\n 'xx' : 'white', # done\n }\n if was_changed:\n return f\"[red][bold]{pstate}[/bold][/red]\"\n col = scols.get(state, 'white')\n return f\"[{col}]{pstate}[/{col}]\"", "def get_hash_string(self) -> str:\n\t\ts = ''\n\t\tfor i in range(self.size):\n\t\t\ts += ''.join(map(str,self.tiles[i]))\n\t\treturn s", "def fold_changes(self) -> str:\n return self._fold_changes", "def __str__(self):\n # Build the string line by line. Join at the end.\n lines = []\n lines.append(\"Initial State: {{{}}}\".format(self.initial_state))\n lines.append(\n \"Final States: {{{}}}\".format(\n \",\".join(map(str, self.final_states))))\n\n # column headers\n lines.append(\n \"State\\t{}\".format(\"\\t\".join(self.alphabet)))\n\n # For each state, print transitions\n for state_name in range(1, len(self.transitions) + 1):\n line = \"{}\".format(state_name)\n for symbol in self.alphabet:\n line += \"\\t{{{}}}\".format(\n \",\".join(map(str, self.transitions.get(\n state_name, dict()).get(symbol, []))))\n lines.append(line)\n\n return \"\\n\".join(lines)", "def __str__(self):\n string = \"\"\n for transaction in self.transaction_list:\n string += transaction.hash+\"|\"\n string = string[:-1]\n return string", "def phrase_dict(phrase):\n switcher = {\n '처음으로': '닥앤미 병원을 찾아주셔서 감사합니다. 직접문의원할시 오른쪽 아래 1:1 버튼을 눌러주시면 직접 상담 가능합니다. 1:1 상담 가능 시간은 09시 – 18시 입니다.',\n '병원 정보': '어떤 정보를 보시고 싶으신가요?',\n '병원 위치': '“닥앤미 병원 주소는 서울시 용산구 이촌동 세움상가 2층입니다.” 더 자세한 지도확인을 원하실 경우 아래 버튼을 눌러주세요',\n '병원 운영시간': '닥앤미 병원을 찾아주셔서 감사합니다. 병원 운영시간은 위의 내용과 같습니다',\n '병원 프로모션': '현재 진행되고 있는 병원 프로모션입니다. 자세히 보길 원하시면 아래의 프로모션을 선택해 주세요',\n '프로모션 A': '닥앤미에서 6월 30일까지 제공되는 프로모션 A 입니다.',\n '프로모션 B': '닥앤미에서 6월 30일까지 제공되는 프로모션 B 입니다.',\n '프로모션 C': '닥앤미에서 6월 30일까지 제공되는 프로모션 C 입니다.',\n '의료진': '안녕하세요, 닥앤미의 홍길동 전문의 입니다. 항상 최선을 다하겠습니다.',\n '병원 사진': '최고의 진료를 제공하는 닥앤미 병원입니다.',\n '병원 진료과목': '닥앤미 병원의 진료과목입니다.',\n '병원 전화하기': '닥앤미 병원 전화번호는 02 3522 XXXX 입니다. 지금 통화를 원하시면 아래 버튼을 눌러주세요'\n }\n default_text = 'Unable to find appropriate text response'\n return switcher.get(phrase, default_text)", "def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments(fragments)\n # Update the hash. Wrapping with 'node<...>' prevents the hash\n # from being extended in a way that would clash with something we can\n # generate. (Probably not an important concern but it doesn't hurt.)\n h.update(\"node<\")\n for f in fragments:\n h.update(f)\n h.update(\">\")", "def __str__(self):\n return ''.join(str(e) + ' ' for e in self.state)", "def __str__(self):\n out = \"!!!!!!! REPORTED STATISTICS !!!!!!!\\n\"\n for k in self.order:\n if k in self.keys():\n if k in self.explainer.keys():\n out += self.explainer[k].replace('XXXX', str(\n self[k])) + \"\\n\"\n else:\n out += self[k] + \"\\n\"\n for k in self.keys():\n if k not in self.order:\n out += str(self[k])\n return out", "def _gen_udev_changes_text(action, reordered_list):\n if not reordered_list:\n return \"\"\n\n lines = [\"%s Device Re-ordering:\" % action]\n lines.extend([\" %6s ==> %s\" % (r['from'], r['to'])\n for r in reordered_list])\n lines.append(\"\")\n\n return \"\\n\".join(lines)" ]
[ "0.5954077", "0.5944224", "0.58483756", "0.58483756", "0.56275684", "0.55849534", "0.5567776", "0.55649203", "0.5451454", "0.5323222", "0.53109807", "0.5302474", "0.5242349", "0.5242349", "0.523923", "0.5228049", "0.5206012", "0.51797116", "0.51464754", "0.51434886", "0.5128021", "0.50920665", "0.5074863", "0.505028", "0.5046738", "0.50351334", "0.5033151", "0.50298864", "0.5029823", "0.50166655" ]
0.6373962
0
Produce an ordered dictionary of changed attrs for a model, keyed by version
def changed_attrs_by_version(model, interesting_attrs): changed = OrderedDict() history = reversion.get_for_object(model).order_by("revision__date_created") for index, version in enumerate(history): # We're only interested in changes if index > 0: try: old = history[index - 1].field_dict new = version.field_dict changed[version] = changed_attrs(old, new, interesting_attrs) except DeserializationError: # Django's deserialisation framework gets upset if it tries to get # a model instance from some json or xml and the instance has fields # which are no longer in the Model - eg: because you just deleted them # in a South migration. # In Django 1.5 you can tell it to ignorenonexistent and it'll just work # which django-reversion helpfully does since: # https://github.com/etianen/django-reversion/issues/221 # In Django 1.4 there is not this option, and thus django-reversion # hits a block when trying to get its' historical versions of the model # and passes on this error to us. At which point we can nothing useful # with it, and so we just ignore that point in history. pass return changed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changed_attrs(old_version, new_version, interesting_attrs):\n # Use an OrderedDict so that we preserve the order from interesting_attrs\n changed = OrderedDict()\n for attr in interesting_attrs:\n if attr in old_version and attr not in new_version:\n changed[attr] = [old_version[attr], None]\n elif attr in new_version and attr not in old_version:\n changed[attr] = [None, new_version[attr]]\n elif old_version[attr] != new_version[attr]:\n changed[attr] = [old_version[attr], new_version[attr]]\n return changed", "def diff(self, old_model_sig):\n if not isinstance(old_model_sig, ModelSignature):\n raise TypeError('Must provide a ModelSignature to diff against, '\n 'not a %s.' % type(old_model_sig))\n\n # Go through all the fields, looking for changed and deleted fields.\n changed_fields = OrderedDict()\n deleted_fields = []\n\n for old_field_sig in old_model_sig.field_sigs:\n field_name = old_field_sig.field_name\n new_field_sig = self.get_field_sig(field_name)\n\n if new_field_sig:\n # Go through all the attributes on the field, looking for\n # changes.\n changed_field_attrs = new_field_sig.diff(old_field_sig)\n\n if changed_field_attrs:\n # There were attribute changes. Store those with the field.\n changed_fields[field_name] = changed_field_attrs\n else:\n # The field has been deleted.\n deleted_fields.append(field_name)\n\n # Go through the list of added fields and add any that don't\n # exist in the original field list.\n added_fields = [\n field_sig.field_name\n for field_sig in self.field_sigs\n if not old_model_sig.get_field_sig(field_sig.field_name)\n ]\n\n # Build a list of changes to Model.Meta attributes.\n meta_changed = []\n\n if self.has_unique_together_changed(old_model_sig):\n meta_changed.append('unique_together')\n\n if self.index_together != old_model_sig.index_together:\n meta_changed.append('index_together')\n\n if list(self.index_sigs) != list(old_model_sig.index_sigs):\n meta_changed.append('indexes')\n\n if list(self.constraint_sigs) != list(old_model_sig.constraint_sigs):\n meta_changed.append('constraints')\n\n return OrderedDict(\n (key, value)\n for key, value in (('added', added_fields),\n ('changed', changed_fields),\n ('deleted', deleted_fields),\n ('meta_changed', meta_changed))\n if value\n )", "def diff(self, old_model_sig):\n if not isinstance(old_model_sig, ModelSignature):\n raise TypeError('Must provide a ModelSignature to diff against, '\n 'not a %s.' % type(old_model_sig))\n\n # Go through all the fields, looking for changed and deleted fields.\n changed_fields = OrderedDict()\n deleted_fields = []\n\n for old_field_sig in old_model_sig.field_sigs:\n field_name = old_field_sig.field_name\n new_field_sig = self.get_field_sig(field_name)\n\n if new_field_sig:\n # Go through all the attributes on the field, looking for\n # changes.\n changed_field_attrs = new_field_sig.diff(old_field_sig)\n\n if changed_field_attrs:\n # There were attribute changes. Store those with the field.\n changed_fields[field_name] = changed_field_attrs\n else:\n # The field has been deleted.\n deleted_fields.append(field_name)\n\n # Go through the list of added fields and add any that don't\n # exist in the original field list.\n added_fields = [\n field_sig.field_name\n for field_sig in self.field_sigs\n if not old_model_sig.get_field_sig(field_sig.field_name)\n ]\n\n # Build a list of changes to Model.Meta attributes.\n meta_changed = []\n\n if self.has_unique_together_changed(old_model_sig):\n meta_changed.append('unique_together')\n\n if self.index_together != old_model_sig.index_together:\n meta_changed.append('index_together')\n\n if list(self.index_sigs) != list(old_model_sig.index_sigs):\n meta_changed.append('indexes')\n\n return OrderedDict(\n (key, value)\n for key, value in (('added', added_fields),\n ('changed', changed_fields),\n ('deleted', deleted_fields),\n ('meta_changed', meta_changed))\n if value\n )", "def changes_for_model(model):\n change_strings = []\n for version, changes in changed_attrs_by_version(model, model.REVISION_ATTRS).items():\n change_string = changes_as_string(changes, model.TRANSITIONS)\n if change_string:\n change_strings.append({\n \"user\": version.revision.user,\n \"description\": change_string,\n \"when\": version.revision.date_created\n })\n\n return change_strings", "def generate_all_attr_change(self):\n return {\n k: self.generator(spec) for k, spec in self.attribute_spec.items()\n }", "def model_instance_diff(obj: Any):\n diff = []\n for mapper_property in get_fields_in_model(obj):\n if isinstance(mapper_property, ColumnProperty):\n key = mapper_property.key\n attribute_state = inspect(obj).attrs.get(key)\n history = attribute_state.history\n if history.has_changes():\n diff.append({\n 'field': key,\n 'old': str(history.deleted[0]) if history.deleted else None,\n 'new': str(attribute_state.value)\n })\n return diff", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def _modified(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))", "def diff(self, old_app_sig):\n if not isinstance(old_app_sig, AppSignature):\n raise TypeError('Must provide an AppSignature to diff against, '\n 'not a %s.' % type(old_app_sig))\n\n deleted_models = []\n changed_models = OrderedDict()\n\n # Process the models in the application, looking for changes to\n # fields and meta attributes.\n for old_model_sig in old_app_sig.model_sigs:\n model_name = old_model_sig.model_name\n new_model_sig = self.get_model_sig(model_name)\n\n if new_model_sig:\n model_changes = new_model_sig.diff(old_model_sig)\n\n if model_changes:\n # There are changes for this model. Store that in the\n # diff.\n changed_models[model_name] = model_changes\n else:\n # The model has been deleted.\n deleted_models.append(model_name)\n\n # Build the dictionary of changes for the app.\n return OrderedDict(\n (key, value)\n for key, value in (('changed', changed_models),\n ('deleted', deleted_models))\n if value\n )", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def attributes(self):\n return dict(self.__attributes)", "def OnAttributesUpdated():\n pass", "def _serialize_model(model, backrefs=None):\n\tbackrefs = [] if not backrefs else backrefs\n\n\tret = {c.name: getattr(model, c.name) for c in model.__table__.columns}\n\t\n\tfor br in backrefs:\n\t\t# not going to work right if it isn't M:1 but that's ok for now.\n\t\tret[br] = _serialize_list(getattr(model, br))\n\n\treturn ret", "def get_sync_attrs(self):\n return self._sync_attrs", "def to_dict(self):\r\n attr_dict = {key: getattr(self, key) for key in self.FEATURES}\r\n attr_dict['created'] = attr_dict['created'].isoformat()\r\n return attr_dict", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def attrs(self):\n return self.__dict__", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def get_changes(model_object):\n\n # Grab the current state of the model_object\n state = db.inspect(model_object)\n changes = False\n\n for attr in state.attrs:\n\n # We skip checking if the password_hash has changed for security reasons.\n # Even if it is being updated, we will not create a notification for this.\n if attr.key == \"password_hash\":\n continue\n\n # Check if attribute has changed. Continue to next attribute if it has not.\n hist = state.get_history(attr.key, True)\n if not hist.has_changes():\n continue\n\n else: # Found changes, so set changes to True and break from loop\n changes = True\n break\n\n return changes", "def diff(self, old_field_sig):\n if not isinstance(old_field_sig, FieldSignature):\n raise TypeError('Must provide a FieldSignature to diff against, '\n 'not a %s.' % type(old_field_sig))\n\n changed_attrs = [\n attr\n for attr in (set(old_field_sig.field_attrs) |\n set(self.field_attrs))\n if self.get_attr_value(attr) != old_field_sig.get_attr_value(attr)\n ]\n\n # See if the field type has changed.\n old_field_type = old_field_sig.field_type\n new_field_type = self.field_type\n\n if old_field_type is not new_field_type:\n try:\n field_type_changed = (old_field_type().get_internal_type() !=\n new_field_type().get_internal_type())\n except TypeError:\n # We can't instantiate those, so assume the field\n # type has indeed changed.\n field_type_changed = True\n\n if field_type_changed:\n changed_attrs.append('field_type')\n\n # FieldSignature.related_model is not a field attribute,\n # but we do need to track its changes.\n if old_field_sig.related_model != self.related_model:\n changed_attrs.append('related_model')\n\n return sorted(changed_attrs)", "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def diffs(self):\n diffs = []\n # XXX i know, we are using the ZODB, so sorry for the cheesy eval()\n # uhm, some logic is not right here as well, we need to look at keys\n # in both the before and after sets :(\n if not self.before or not self.after:\n return []\n before = eval(self.before)\n # pfft!\n if not before:\n return []\n after = eval(self.after)\n for k,v in before.items():\n if k in ['objectClass','userPassword']:\n continue\n try:\n if k == 'uniqueMember':\n added, removed = uniqueMemberDiff(\n v, after['uniqueMember'] )\n diffs.append( {'attribute' : k,\n 'added' : added,\n 'removed' : removed,\n }\n )\n elif str(v) != str(after[k]):\n diffs.append( { 'attribute' : k,\n 'before' : before[k],\n 'after' : after[k] }\n )\n except KeyError:\n pass\n return diffs", "def get_changed_columns(self):\r\n return [k for k,v in self._values.items() if v.changed]", "def model_attributes(self, app_label, model):\n model_name = model.__name__\n model_name_plural = self.model_name_plural(model)\n slug_field = self.get_unique_slug_field_name(model)\n slug_field_name = slug_field.name if slug_field else \"slug\"\n lookup_field = slug_field_name if slug_field else \"pk\"\n return {\n 'app_label': app_label,\n 'model': model,\n 'model_name': model_name,\n 'model_name_slug': self.camel_to_slug(model_name),\n 'model_name_plural': model_name_plural,\n 'model_name_plural_slug': self.camel_to_slug(model_name_plural),\n 'model_fields': self.get_field_names_for_model(model),\n 'slug_field': slug_field,\n 'slug_field_name': slug_field_name,\n 'lookup_field': lookup_field\n }", "def get_fields_in_model(instance: Any) -> List:\n from auditlog.registry import auditlog\n\n attrs = object_mapper(instance).iterate_properties\n model_attrs = auditlog.get_model_fields(instance.__class__)\n if model_attrs['include_fields']:\n attrs = (attr for attr in attrs if attr.key in model_attrs['include_fields'])\n if model_attrs['exclude_fields']:\n attrs = (attr for attr in attrs if attr.key not in model_attrs['exclude_fields'])\n\n return attrs", "def keys(self):\n return [a.name for a in self.__attrs_attrs__]", "def serialize(self, sig_version=LATEST_SIGNATURE_VERSION):\n validate_sig_version(sig_version)\n\n type_module = self.type.__module__\n\n if type_module.startswith('django.db.models.constraints'):\n type_module = 'django.db.models'\n\n attrs = {}\n\n for key, value in six.iteritems(self.attrs):\n if hasattr(value, 'deconstruct'):\n attr_type_path, attr_args, attr_kwargs = value.deconstruct()\n\n value = {\n 'type': attr_type_path,\n 'args': attr_args,\n 'kwargs': attr_kwargs,\n '_deconstructed': True,\n }\n\n attrs[key] = value\n\n return {\n 'name': self.name,\n 'type': '%s.%s' % (type_module, self.type.__name__),\n 'attrs': serialize_to_signature(self.attrs),\n }", "def extra_state_attributes(self):\n return dict(\n self._instrument.attributes,\n model=\"{}/{}\".format(\n self._instrument.vehicle_model, self._instrument.vehicle_name\n ),\n model_year=self._instrument.vehicle_model_year,\n model_family=self._instrument.vehicle_model_family,\n title=self._instrument.vehicle_name,\n csid=self._instrument.vehicle_csid,\n vin=self._instrument.vehicle_vin,\n )", "def _get_updates(self, patch):\n updates = {}\n for p in patch:\n attribute = p['path'] if p['path'][0] != '/' else p['path'][1:]\n updates[attribute] = p['value']\n return updates", "def attribute_dict(self):\n return self.__attribute_dict" ]
[ "0.74727905", "0.6438466", "0.64251757", "0.64148694", "0.63640875", "0.62945694", "0.5877305", "0.5853865", "0.5761615", "0.57073545", "0.5665202", "0.55875015", "0.55512357", "0.5547303", "0.5520776", "0.55064106", "0.5505713", "0.546988", "0.5458309", "0.544397", "0.5434651", "0.53874093", "0.5381451", "0.5357058", "0.5345115", "0.5337876", "0.533612", "0.53196335", "0.5316516", "0.52999663" ]
0.8237712
0
Return a list of changes in English for a given model.
def changes_for_model(model): change_strings = [] for version, changes in changed_attrs_by_version(model, model.REVISION_ATTRS).items(): change_string = changes_as_string(changes, model.TRANSITIONS) if change_string: change_strings.append({ "user": version.revision.user, "description": change_string, "when": version.revision.date_created }) return change_strings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history(self, id):\n lm, previous_versions = h.get_model_and_previous_versions('MorphemeLanguageModel', id)\n if lm or previous_versions:\n return {'morpheme_language_model': lm,\n 'previous_versions': previous_versions}\n else:\n response.status_int = 404\n return {'error': 'No morpheme language models or morpheme language model backups match %s' % id}", "def clean_text(corpus, model): \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower()) \n final = \", \".join(map(str,new_doc)) \n return final", "def getChanges():", "def _get_studio_action_translations(self, model, **kwargs):\n domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]\n\n # search view + its inheritancies\n views = request.env['ir.ui.view'].search([('model', '=', model.model)])\n domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain\n\n def make_domain(fld, rec):\n name = \"%s,%s\" % (fld.model_name, fld.name)\n return ['&', ('res_id', '=', rec.id), ('name', '=', name)]\n\n def insert_missing(fld, rec):\n if not fld.translate:\n return []\n\n if fld.related:\n try:\n # traverse related fields up to their data source\n while fld.related:\n rec, fld = fld.traverse_related(rec)\n if rec:\n return ['|'] + domain + make_domain(fld, rec)\n except AccessError:\n return []\n\n assert fld.translate and rec._name == fld.model_name\n request.env['ir.translation'].insert_missing(fld, rec)\n return []\n\n # insert missing translations of views\n for view in views:\n for name, fld in view._fields.items():\n domain += insert_missing(fld, view)\n\n # insert missing translations of model, and extend domain for related fields\n record = request.env[model.model].search([], limit=1)\n if record:\n for name, fld in record._fields.items():\n domain += insert_missing(fld, record)\n\n action = {\n 'name': _('Translate view'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'ir.translation',\n 'view_mode': 'tree',\n 'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],\n 'target': 'current',\n 'domain': domain,\n }\n\n return action", "def translated_fields(model):\n\n options = translator.get_options_for_model(model)\n fields = [f.name for l in options.fields.values() for f in l]\n\n for i, f in enumerate(fields):\n if f.endswith(settings.MODELTRANSLATION_DEFAULT_LANGUAGE):\n del fields[i]\n\n return fields", "def clean_text(corpus, model):\n \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower())\n \n cleaned_string = \", \".join(new_doc) # putting the strings back into one string\n return cleaned_string", "def get_change_list():\n today = date.today()\n current_month = today.month\n current_year = today.year\n current_month_pred = dict()\n previous_month_pred = dict()\n change = list()\n commodity_list = [\"arhar\", \"bajra\", \"barley\", \"copra\", \"cotton\", \"sesamum\", \"gram\", \"groundnut\",\n \"jowar\", \"maize\", \"masoor\", \"moong\", \"niger\", \"paddy\", \"ragi\", \"rape\", \"jute\",\n \"safflower\", \"soyabean\", \"sugarcane\", \"sunflower\", \"urad\", \"wheat\"]\n if current_month == 1:\n previous_month = 12\n previous_year = current_year - 1\n else:\n previous_month = current_month - 1\n previous_year = current_year\n for crop in commodity_list:\n model_path = \"static/models/\" + crop + \".joblib\"\n model = load(model_path)\n current_month_wpi = model.predict(pd.DataFrame([current_month, current_year]).T)[0]\n current_month_pred[crop] = current_month_wpi\n previous_month_wpi = model.predict(pd.DataFrame([previous_month, previous_year]).T)[0]\n previous_month_pred[crop] = previous_month_wpi\n change.append((((current_month_wpi - previous_month_wpi) * 100 / previous_month_wpi), crop))\n sorted_change = change\n return sorted_change, current_month_pred, previous_month_pred", "def get_langs(id):", "def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp", "async def recentchanges(self, ctx, limit=50):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Wiki.recentchanges: ' + str(limit), extra={'invoker': ctx.message.author.name})\r\n twenties, limit = divmod(limit, 20)\r\n async with ctx.channel.typing():\r\n result = ['']\r\n changes = []\r\n start = 'now'\r\n for i in [20 for j in range(twenties)] + [limit]:\r\n resp = await self.req({\r\n 'action': 'query',\r\n 'list': 'recentchanges',\r\n 'rcprop': 'user|timestamp|comment|title|sizes|flags',\r\n 'rctype': 'edit|new',\r\n 'rclimit': i,\r\n 'rcstart': start\r\n })\r\n changes.extend(resp['query']['recentchanges'])\r\n start = resp['query']['recentchanges'][-1]['timestamp']\r\n i = 0\r\n for ch in changes:\r\n change = '\\n'\r\n change += ch['timestamp']\r\n change += ': '\r\n change += ch['title']\r\n change += '; '\r\n sizechange = ch['newlen'] - ch['oldlen']\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += '('\r\n if sizechange <= 0:\r\n change += str(sizechange)\r\n if sizechange > 0:\r\n change += '+' + str(sizechange)\r\n change += ')'\r\n if sizechange <= -500 or sizechange >= 500:\r\n change += '**'\r\n change += ' . . '\r\n change += ch['user']\r\n change += ' _('\r\n change += ch['comment'].replace('*', '\\\\*').replace('_', '\\\\_').replace('`', '\\\\`')\r\n change += ')_'\r\n result[i] += change\r\n if len(result[i]) > 2000:\r\n result.append('')\r\n result[i], result[i+1] = result[i].rsplit('\\n', 1)\r\n i += 1\r\n for r in result:\r\n await ctx.send(r)", "def test_l10n_changesets(self):\n url = reverse('shipping.views.status.l10n_changesets')\n url += '?av=fx1.0'\n response = self.client.get(url)\n eq_(response.status_code, 200)\n eq_(response.content, \"\"\"da l10n da 0003\nde l10n de 0002\n\"\"\")", "def GetChanges(self):\n return self._changes", "def get_monitored_changes(self) -> List:\n pass", "def get_translation_history(request, template=None):\n log.debug(\"Get history of translations of given entity to given locale.\")\n\n if not request.is_ajax():\n log.error(\"Non-AJAX request\")\n raise Http404\n\n try:\n entity = request.GET['entity']\n locale = request.GET['locale']\n plural_form = request.GET['plural_form']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n log.debug(\"Entity: \" + entity)\n log.debug(\"Locale: \" + locale)\n\n try:\n entity = Entity.objects.get(pk=entity)\n except Entity.DoesNotExist as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n try:\n locale = Locale.objects.get(code__iexact=locale)\n except Locale.DoesNotExist as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n translations = Translation.objects.filter(entity=entity, locale=locale)\n if plural_form != \"-1\":\n translations = translations.filter(plural_form=plural_form)\n translations = translations.order_by('-approved', '-date')\n\n if len(translations) > 0:\n payload = []\n offset = timezone.now().strftime('%z')\n\n for t in translations:\n u = t.user\n a = t.approved_user\n o = {\n \"id\": t.id,\n \"user\": \"Imported\" if u is None else u.first_name or u.email,\n \"email\": \"\" if u is None else u.email,\n \"translation\": t.string,\n \"date\": t.date.strftime('%b %d, %Y %H:%M'),\n \"date_iso\": t.date.isoformat() + offset,\n \"approved\": t.approved,\n \"approved_user\": \"\" if a is None else a.first_name or a.email,\n }\n payload.append(o)\n\n return HttpResponse(\n json.dumps(payload, indent=4), content_type='application/json')\n\n else:\n log.debug(\"Translations do not exist\")\n return HttpResponse(\"error\")", "def data_en(request):\n files = myFile.objects.order_by('name')\n context = {'files' : files}\n return render(request, 'sacms/data_en.html', context)", "def 取所有项目文本(self): # real signature unknown; restored from __doc__\n return self.GetStrings()", "def notices_en(request):\n notice_list = Notice.objects.order_by('published_date')\n context = {'notice_list': notice_list}\n return render(request, 'sacms/notices_en.html', context)", "def all_changes(db):\n changes = db['changes']\n\n for c in changes.find():\n yield Change(c)", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def getStopWords(spacy_model):\r\n # for languages available go to: https://github.com/stopwords-iso\r\n s_words = stopwords.stopwords('en')\r\n\r\n analyzer = partial(rawAnalyzer, spacy_model, [])\r\n return seq(s_words).flat_map(analyzer).to_list()", "def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }", "def get_translated_ids(id):", "def get_translation(self):", "def GetChangesSample():\n client = CreateClient()\n changes = client.GetChanges()\n for change in changes.entry:\n print change.title.text, change.changestamp.value", "def gather_sentences(self):\n sentences = Sentence.objects.all()\n return sentences", "def get_remarks(self, model=None):\n # Line indices where a new model starts\n model_start_i = np.array([i for i in range(len(self.lines))\n if self.lines[i].startswith((\"MODEL\"))],\n dtype=int)\n # Line indices with ATOM or HETATM records\n remark_line_i = np.array([i for i in range(len(self.lines)) if\n self.lines[i].startswith(\"REMARK\")],\n dtype=int)\n # Structures containing only one model may omit MODEL record\n # In these cases model starting index is set to 0\n if len(model_start_i) == 0:\n model_start_i = np.array([0])\n \n if model is None:\n # Add exclusive end of file\n model_start_i = np.concatenate((model_start_i, [len(self.lines)]))\n model_i = 0\n remarks = []\n for i in range(len(model_start_i) - 1):\n start = model_start_i[i]\n stop = model_start_i[i+1]\n model_remark_line_i = remark_line_i[\n (remark_line_i >= start) & (remark_line_i < stop)\n ]\n remarks.append(\n \"\\n\".join([self.lines[i][7:] for i in model_remark_line_i])\n )\n return remarks\n \n else:\n last_model = len(model_start_i)\n if model == 0:\n raise ValueError(\"The model index must not be 0\")\n # Negative models mean index starting from last model\n model = last_model + model + 1 if model < 0 else model\n\n if model < last_model:\n line_filter = ( ( remark_line_i >= model_start_i[model-1] ) &\n ( remark_line_i < model_start_i[model ] ) )\n elif model == last_model:\n line_filter = (remark_line_i >= model_start_i[model-1])\n else:\n raise ValueError(\n f\"The file has {last_model} models, \"\n f\"the given model {model} does not exist\"\n )\n remark_line_i = remark_line_i[line_filter]\n \n # Do not include 'REMARK ' itself -> begin from pos 8\n return \"\\n\".join([self.lines[i][7:] for i in remark_line_i])", "def predictLanguage(self, data, model):\n\n # The model is in the format .dat, needs to be converted back into a dictionary\n with open(model, \"rb\") as f:\n dtModel = dict(pickle.load(f))\n\n recNum = 1\n for record in data:\n subModel = dtModel\n # set default language to english\n language = 'en'\n # if submodel exits\n while (isinstance(subModel, dict)):\n subInd = next(iter(subModel))\n subModel = subModel[subInd]\n\n i = TreeNode(subInd, subModel).value\n val = record[i]\n keys = subModel.keys()\n\n if val in keys:\n language = subModel[val]\n subModel = subModel[val]\n continue\n # if keys doesn't contain val\n language = ''\n break\n # Print the predicted classes\n if language != '':\n print('Class predicted for Record '+ str(recNum) + ': ' + self.getClass(record, language))\n recNum += 1", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)", "def languages(self):\n return LanguageCodes.english_names" ]
[ "0.55791515", "0.55296826", "0.5505239", "0.54076606", "0.5240658", "0.5239974", "0.5236164", "0.5233433", "0.51919174", "0.51505697", "0.5029553", "0.50116163", "0.49868023", "0.49377528", "0.49096978", "0.48631215", "0.48531693", "0.4844625", "0.4843526", "0.48364088", "0.48200992", "0.48171178", "0.4786805", "0.47783855", "0.47600543", "0.47490054", "0.4735822", "0.47318894", "0.47267666", "0.4702755" ]
0.7259947
0
Convert a base 32 string to an integer
def base32_to_int(s): mistyped = False if s.find('o') > -1 or s.find('i') > -1 or s.find('l') > -1: s = s.replace('o', '0').replace('i', '1').replace('l', '1') mistyped = True decoded = 0 multi = 1 while len(s) > 0: decoded += multi * base32_digits.index(s[-1:]) multi = multi * 32 s = s[:-1] if mistyped: raise MistypedIDException(decoded) return decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex2int(r: str) -> int:", "def bin2int(r: str) -> int:", "def base36_to_int(s: str):\n # To prevent overconsumption of server resources, reject any\n # base36 string that is longer than 13 base36 digits (13 digits\n # is sufficient to base36-encode any 64-bit integer)\n if len(s) > 13:\n raise ValueError(\"Base36 input too large\")\n return int(s, 36)", "def byte_str_to_int(str):\n return int.from_bytes(str, byteorder = \"big\")", "def base36_to_int(s):\n # To prevent overconsumption of server resources, reject any\n # base36 string that is longer than 13 base36 digits (13 digits\n # is sufficient to base36-encode any 64-bit integer)\n if len(s) > 13:\n raise ValueError(\"Base36 input too large\")\n return int(s, 36)", "def bytes_to_int(s):\n # int type casts may return a long type\n return int(s.encode('hex'), 16)", "def dec2int(r: str) -> int:", "def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)", "def str2num(s):\n\n i = 0\n l = 0\n try:\n for i in range(len(s)):\n l = l << 8\n l += ord(s[i])\n return l\n except:\n return 0", "def hash_string_to_int(\r\n k: bytes,\r\n e: str,\r\n) -> int:\r\n return int.from_bytes(hash_string(k, e), 'big')", "def bitstr_to_int(a):\n return int(a, 2)", "def __hex2int(_hex_str):\n return int(\"0x\"+_hex_str, 16)", "def hex_to_int(hex_string):\r\n return int(hex_string, 16)", "def hex2int(hex_str):\n return int(hex_str, 16)", "def bin_to_int(bit_string):\r\n return int(''.join(bit_string), 2)", "def int_to_base32(i):\n enc = ''\n while i >= 32:\n i, mod = divmod(i, 32)\n enc = base32_digits[mod] + enc\n enc = base32_digits[i] + enc\n return enc", "def text2Int(text):\n return reduce(lambda x, y : (x << 8) + y, map(ord, text))", "def str2num(size, s):\n\n\ti = 0\n\tn = 0\n\twhile i < size:\n\n\t\tn = n | (ord(s[i]) << (i*8))\n\t\ti = i + 1\n\n\treturn n", "def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n return result", "def getInt(string, radix, needHexPrefix):\n return (0)", "def data_to_int(data): \r\n data = str(data).strip().upper()\r\n if data[0]== 'B':\r\n return bin_to_int(data[1:])\r\n elif data[0]== 'H':\r\n return hex_to_int(data[1:])\r\n else:\r\n return int(data, 10)", "def to_int(s: str) -> int:\n try:\n return int(s.replace('_', ''))\n except ValueError:\n return int(ast.literal_eval(s))", "def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n flip = False;\n if string[0]=='-':\n \tflip=True;\n \tstring = string[1:]\n pow = len(string)-1\n for letr in string:\n \tletrNum = int(letr)\n \tresult+= letrNum*(base**pow)\n \tpow-=1\n if flip:\n \tresult= -result\n return result", "def string_to_int(value):\n ival = None\n\n try:\n ival = float(value)\n ival = int(ival)\n except Exception:\n pass\n\n return ival", "def toInteger(data):\n\tif isInteger(data):\n\t\treturn data\n\telse:\n\t\treturn ord(data)", "def pseudo_int(string_num):\r\n int_num = 0\r\n reversed_string_num = string_num[::-1] # begin read the characters from the end of the string.\r\n for indexx in range(len(string_num)):\r\n digit = reversed_string_num[indexx]\r\n int_num += (ord(digit) - ord('0')) * 10**indexx # '2698' => 8 * 10**0 + 9 * 10**1 + 6 * 10**2 + 2 * 10**3 = 2698\r\n return int_num", "def mac_str_to_int(mac_str):\n return int(mac_str.replace(':', ''), 16)", "def toint(s):\n try:\n n = int(s)\n except ValueError:\n n = 0\n return n if n >= 0 else 0", "def safeint(s):\n try:\n return int(force_unicode(s))\n except (ValueError, TypeError):\n return 0", "def hex2num(s):\n\n\tn = 0\n\n\tfor i in range(0,len(s)):\n\n\t\ta = ord(s[len(s)-i-1])\n\t\tif (a >= 48) & (a <= 57):\n\t\t\tn = n | ((a-48) << (i*4))\n\t\telif (a >= 65) & (a <= 70):\n\t\t\tn = n | ((a-65+10) << (i*4))\n\t\telif (a >= 97) & (a <= 102):\n\t\t\tn = n | ((a-97+10) << (i*4))\n\t\telse:\n\t\t\treturn None\n\n\treturn n" ]
[ "0.783031", "0.7773133", "0.7620709", "0.75134236", "0.75118244", "0.73315924", "0.7288714", "0.7273519", "0.7217547", "0.71261877", "0.71135163", "0.7068918", "0.70656765", "0.6995284", "0.69599956", "0.69458324", "0.6936514", "0.6910038", "0.6902294", "0.6878195", "0.68748", "0.6787733", "0.6765449", "0.673843", "0.66971934", "0.6685492", "0.6676859", "0.6667562", "0.6656347", "0.66052544" ]
0.8473583
0
Converts an integer to a base32 string
def int_to_base32(i): enc = '' while i >= 32: i, mod = divmod(i, 32) enc = base32_digits[mod] + enc enc = base32_digits[i] + enc return enc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def char32_t(n):\n return int(n).to_bytes(32, byteorder='little', signed=False)", "def int2hex(n: int) -> str:", "def encode_i32(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.I32].LENGTH, True)", "def encode_u32(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.U32].LENGTH, False)", "def base32_to_int(s):\n mistyped = False\n if s.find('o') > -1 or s.find('i') > -1 or s.find('l') > -1:\n s = s.replace('o', '0').replace('i', '1').replace('l', '1')\n mistyped = True\n decoded = 0\n multi = 1\n while len(s) > 0:\n decoded += multi * base32_digits.index(s[-1:])\n multi = multi * 32\n s = s[:-1]\n if mistyped:\n raise MistypedIDException(decoded)\n return decoded", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def int32_t(n):\n return int(n).to_bytes(4, byteorder='little', signed=True)", "def _base32_to_hex(base32):\n ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'\n x = 0\n for digit in str(base32.upper().strip(' ')):\n x = x * len(ALPHABET) + ALPHABET.index(digit)\n return hex(x).lstrip('0x').rstrip('L').upper()", "def encode32(x):\n while x < 0:\n x = x + 2**32 # Convert 2's complement negative numbers to unsigned\n assert((x >> 32) == 0) # Must be a 32bit quantity\n x = pack(x, 32, 'little', False) # Convert to little endian\n\n s = \"\"\n for b in x:\n s += '{:02x}'.format(b) # Convert little endian, unsigned, 32bit value to string number in hex base\n assert(len(s) == 8)\n return \"\\\\u\" + s[0:4] + \"\\\\u\" + s[4:] #e.g. \\u4e4d\\u4e4f represents input number x == 4f4e4d4e", "def baseEncode(number, base=36):\n if base == 10:\n return str(number)\n if not isinstance(number, int):\n raise TypeError('number must be an integer')\n alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\n if base > 62 or base <=1:\n print(\"base should be between 2 and 62\")\n return None\n sign = \"\"\n if number < 0:\n sign = \"-\"\n number = -number\n alphabet = alphabet[:base+1]\n if 0 <= number and number <base:\n return sign+alphabet[number]\n numberbase=\"\"\n while number != 0:\n number, i = divmod(number, base)\n numberbase = alphabet[i] + numberbase\n return sign+numberbase", "def format(id, length=5):\n return str(bin(id))[2:] if len(str(int(id))[2:])>4 else (5-len(str(bin(id))[2:]))*\"0\"+str(bin(id))[2:]", "def int_to_hexstr(data: int) -> str:\n return \"%0.2X\" % data", "def encode_base32_from_list(list_of_int: List[int]) -> str:\n data = BytesIO()\n for i in list_of_int:\n buf = b\"\"\n while True:\n towrite = i & 0x7f\n i >>= 7\n if i:\n buf += bytes((towrite | 0x80,))\n else:\n buf += bytes((towrite,))\n break\n data.write(buf)\n data.seek(0)\n return b32encode(data.read()).decode().replace('=', '')", "def ulid_to_base32(ulid):\n return encode_ulid_base32(ulid_to_binary(ulid))", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def _encode_int(source: int) -> bytes:\n return b\"i\" + str(source).encode(\"ascii\") + b\"e\"", "def encode(number):\r\n\tassert number >= 0\r\n\tout = \"\"\r\n\r\n\tif number == 0:\r\n\t\tout = keyspace[0]\r\n\telse:\r\n\t\twhile number > 0:\r\n\t\t\tnumber, digit = divmod(number, keyspace_len)\r\n\t\t\tout += keyspace[digit]\r\n\treturn out[::-1]", "def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num", "def int_to_hex(num):\n return hex(num)", "def _int2str(num):\n if num<10:\n return '00%s'%str(num)\n elif 10<=num<100:\n return '0%s'%str(num)\n else:\n return '%s'%str(num)", "def int32_to_bytes(value):\n return struct.pack(\"i\", value)", "def encode_int(n):\n return struct.pack(\">I\", n)", "def uint32_t(n):\n return int(n).to_bytes(4, byteorder='little', signed=False)", "def number(size, n):\n\n\t# Little endian writing\n\n\ts = \"\"\n\n\twhile size > 0:\n\t\ti = n % 256\n\t\ts = s + chr(i)\n#\t\tn = n / 256\n\t\tn = n >> 8\n\t\tsize = size - 1\n\n\treturn s", "def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n currentnum = num\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n if not num:\n return '0'\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n return converted_string", "def encode_int(v):\n if v < 240:\n return chr(v)\n elif v <= 2287:\n v -= 240\n d, m = divmod(v, 256)\n return chr(241 + d) + chr(m)\n elif v <= 67823:\n v -= 2288\n d, m = divmod(v, 256)\n return '\\xf9' + chr(d) + chr(m)\n elif v <= 16777215:\n return '\\xfa' + struct.pack('>L', v)[-3:]\n elif v <= 4294967295:\n return '\\xfb' + struct.pack('>L', v)\n elif v <= 1099511627775:\n return '\\xfc' + struct.pack('>Q', v)[-5:]\n elif v <= 281474976710655:\n return '\\xfd' + struct.pack('>Q', v)[-6:]\n elif v <= 72057594037927935:\n return '\\xfe' + struct.pack('>Q', v)[-7:]\n else:\n assert v.bit_length() <= 64\n return '\\xff' + struct.pack('>Q', v)", "def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n\n currentnum = num\n\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n\n if not num:\n return '0'\n\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n\n return converted_string", "def int_to_bitstr(a, size = None):\n if size is None:\n size = len(hex(a)[2:]) * 4\n return (bin(a)[2:]).zfill(size)", "def int2bin(n: int) -> str:", "def base32_encode(string: str) -> bytes:\n\n # encoded the input (we need a bytes like object)\n # then, b32encoded the bytes-like object\n return base64.b32encode(string.encode(\"utf-8\"))" ]
[ "0.72810096", "0.7087469", "0.70650214", "0.7059178", "0.6979534", "0.6882167", "0.68775755", "0.68682367", "0.67978215", "0.6706196", "0.6661549", "0.66425776", "0.66322464", "0.662629", "0.6607996", "0.6601673", "0.6540795", "0.6535475", "0.6533861", "0.6529681", "0.651574", "0.6483985", "0.6477835", "0.6469654", "0.64509666", "0.64464253", "0.64418375", "0.6439038", "0.6433887", "0.64110804" ]
0.8403315
0
Check that the raw feed is the full XML document
def test_raw_feed(self): self.assertEqual(self.feed.feed.raw[:6].decode('utf-8'), "<?xml ")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_good_enough_xml(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('xml') > -1)", "def check_rss_dom_structure(doc):\n if isinstance(doc, type(None)) or not doc.hasChildNodes() or len(doc.childNodes) != 1:\n raise Exception('Invalid document')\n root_elmt = doc.childNodes[0]\n if (\n root_elmt.nodeName != 'rss'\n or not root_elmt.hasChildNodes()\n or len(root_elmt.childNodes) != 1\n or root_elmt.getAttribute('version') != '2.0'\n ):\n raise Exception('Invalid root element')\n channel_elmt = root_elmt.childNodes[0]\n has_title = False\n has_description = False\n has_link = False\n for elmt in channel_elmt.childNodes:\n if elmt.nodeName == 'title':\n has_title = True\n elif elmt.nodeName == 'description':\n has_description = True\n elif elmt.nodeName == 'link':\n has_link = True\n if not has_title or not has_description or not has_link:\n raise Exception('Missing header element')\n\n return channel_elmt", "def isGood(self):\n return _libsbml.XMLInputStream_isGood(self)", "def is_rss(self):\r\n return self.__content_type in feed_types", "def is_rss(self):\n return self.__content_type in feed_types", "def detect(stream):\n try:\n parse(stream)\n return True\n except (xml.parsers.expat.ExpatError, TypeError):\n return False", "def test_feed(app, status, warning):\n app.build()\n assert app.statuscode == 0\n\n feed_path = app.outdir / \"blog/atom.xml\"\n assert (feed_path).exists()\n\n with feed_path.open() as feed_opened:\n feed_tree = lxml.etree.parse(feed_opened)\n entries = feed_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(entries) == 2\n\n entry = entries[0]\n title = entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[0].attrib[\"term\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n assert categories[1].attrib[\"term\"] == \"FooTag\"\n content = entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo post content.\" in content.text\n update_time = entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n first_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n empty_entry = entries[1]\n title = empty_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Empty Post\"\n summary = empty_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary is None\n categories = empty_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 0\n content = empty_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert 'id=\"foo-empty-post\"' in content.text\n update_time = empty_entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n second_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n # check order of post based on their dates\n assert first_entry_date > second_entry_date\n\n social_path = app.outdir / \"blog/social.xml\"\n assert (social_path).exists()\n\n with social_path.open() as social_opened:\n social_tree = lxml.etree.parse(social_opened)\n social_entries = social_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(social_entries) == len(entries)\n\n social_entry = social_entries[0]\n title = social_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = social_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = social_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n content = social_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo Post Title\" in content.text", "def test_receiveBadXML(self):\n streamError = []\n streamEnd = []\n\n def streamErrorEvent(reason):\n streamError.append(reason)\n\n def streamEndEvent(_):\n streamEnd.append(None)\n\n self.xmlstream.addObserver(xmlstream.STREAM_ERROR_EVENT, streamErrorEvent)\n self.xmlstream.addObserver(xmlstream.STREAM_END_EVENT, streamEndEvent)\n self.xmlstream.connectionMade()\n\n self.xmlstream.dataReceived(\"<root>\")\n self.assertEqual(0, len(streamError))\n self.assertEqual(0, len(streamEnd))\n\n self.xmlstream.dataReceived(\"<child><unclosed></child>\")\n self.assertEqual(1, len(streamError))\n self.assertTrue(streamError[0].check(domish.ParserError))\n self.assertEqual(1, len(streamEnd))", "def test_rss_is_parseable(self):\r\n [make_bookmark() for i in range(10)]\r\n transaction.commit()\r\n\r\n res = self.app.get('/rss')\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"200 OK\",\r\n msg='recent status is 200, ' + res.status)\r\n\r\n # http://packages.python.org/feedparser/\r\n # introduction.html#parsing-a-feed-from-a-string\r\n parsed = feedparser.parse(res.body)\r\n links = []\r\n for entry in parsed.entries:\r\n links.append({\r\n 'title': entry.title,\r\n 'category': entry.category,\r\n 'date': time.strftime('%d %b %Y', entry.updated_parsed),\r\n 'description': entry.description,\r\n 'link': entry.link,\r\n })\r\n\r\n self.assertTrue(links, 'The feed should have a list of links.')\r\n self.assertEqual(10, len(links), 'There are 10 links in the feed.')\r\n\r\n sample_item = links[0]\r\n self.assertTrue(sample_item['title'], 'Items have a title.')\r\n self.assertTrue(\r\n sample_item['link'],\r\n 'Items have a link to reach things.')\r\n self.assertTrue(\r\n 'description' in sample_item,\r\n 'Items have a description string.')", "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed", "def test_incomplete_xml_no_timestamp(self):\n self.__opener.contents = '<Report><Doc><Summary/></Doc></Report>'\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('url'))", "def assertValidXMLResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('application/xml'))\r\n self.assertValidXML(resp.content)", "def _check_empty_feed(self, items, rest_of_world):\n if not items or (len(items) == 1 and items[0].get('shelf')):\n # Empty feed.\n if rest_of_world:\n return -1\n return 0\n return 1", "def _parse_feed(endpoint):\n return etree.parse(endpoint).getroot()", "def _retrieveFeed(self):\n url = self.url\n if url!='':\n self._last_update_time_in_minutes = time.time()/60\n self._last_update_time = DateTime()\n d = feedparser.parse(url)\n if getattr(d, 'bozo', 0) == 1 and not isinstance(d.get('bozo_exception'),\n ACCEPTED_FEEDPARSER_EXCEPTIONS):\n self._loaded = True # we tried at least but have a failed load\n self._failed = True\n return False\n self._title = d.feed.title\n self._siteurl = d.feed.link\n self._items = []\n for item in d['items']:\n try:\n link = item.links[0]['href']\n itemdict = {\n 'title': item.title,\n 'url': link,\n 'summary': item.get('description', ''),\n }\n if hasattr(item, \"updated\"):\n try:\n itemdict['updated'] = DateTime(item.updated)\n except DateTimeError:\n # It's okay to drop it because in the\n # template, this is checked with\n # ``exists:``\n pass\n except AttributeError:\n continue\n self._items.append(itemdict)\n self._loaded = True\n self._failed = False\n return True\n self._loaded = True\n self._failed = True # no url set means failed\n return False # no url set, although that actually should not really happen", "def isXML(content):\n\n testStr = '<?xml'\n\n # File case.\n if hasattr(content, 'read') and hasattr(content, 'seek'):\n xml = content.read(len(testStr))\n content.seek(0)\n if testStr == xml:\n return True\n\n # String case.\n elif isinstance(content, types.StringTypes):\n if content.startswith(testStr):\n return True\n\n return False", "def validate(self) :\n\t\tif self.doc is not None :\n\t\t\tparser = etree.XMLParser(recover=True, strip_cdata=True)\n\t\t\ttree = etree.XML(self.doc.toxml(), parser)\n\t\t\tdtdFile = self._getDTDFile()\n\t\t\tif dtdFile is not None :\n\t\t\t\tif _existFile(dtdFile) :\n\t\t\t\t\tdtd = etree.DTD(dtdFile)\n\t\t\t\t\tif dtd.validate(tree) :\n\t\t\t\t\t\tself._enrichXML()\n\t\t\t\t\t\treturn True\n\t\t\t\t\telse :\n\t\t\t\t\t\tprint(dtd.error_log.filter_from_errors()[0])\n\t\t\t\t\t\treturn False\n\t\t\t\telse :\n\t\t\t\t\tprint('Unable to find the DTD file ',dtdFile)\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tself._enrichXML()\n\t\t\t\treturn True\n\t\telse :\n\t\t\treturn False", "def test_none(self):\n\n feed = parseFeed()\n issues = []\n for item in feed.getElementsByTagName(\"entry\"):\n for description in item.getElementsByTagName(\"title\"):\n issues.append(description.firstChild.wholeText)\n self.assertEqual([], issues)", "def sanity_check(self):\n try:\n self._read()\n except tomlkit.exceptions.TOMLKitError:\n return False\n else:\n return True", "def assertValidXML(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_xml(data)", "def read_xml(self):\n pass", "def test_simple_2(self):\n xml_parser = XmlParser()\n xml = load_fixture(\"xml_parser_simple_2.xml\")\n feed = xml_parser.parse(xml)\n self.assertIsNotNone(feed)\n self.assertIsNotNone(feed.entries)\n assert len(feed.entries) == 1", "def test_simple_1(self):\n xml_parser = XmlParser()\n xml = load_fixture(\"xml_parser_simple_1.xml\")\n feed = xml_parser.parse(xml)\n self.assertIsNotNone(feed)\n self.assertIsNotNone(feed.entries)\n assert len(feed.entries) == 1", "def test_rss_added(self):\r\n body_str = \"application/rss+xml\"\r\n res = self.app.get('/recent')\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"200 OK\",\r\n msg='recent status is 200, ' + res.status)\r\n self.assertTrue(\r\n body_str in res.body,\r\n msg=\"Request should contain rss str: \" + res.body)", "def test_incomplete_xml(self):\n self.__opener.contents = '<Report></Report>>'\n self.assertEqual(-1, self.__uft.failed_tests('url'))", "def _check_deprecated_data_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True", "def is_castxml(self):\n return self._is_castxml", "def test_valid_xml(self):\r\n self.build_problem()\r\n self.assertTrue(True)", "def _delicious_xml_data_test(self):\r\n res = Bmark.query.all()\r\n self.assertEqual(\r\n len(res),\r\n 25,\r\n \"We should have 25 results, we got: \" + str(len(res)))\r\n\r\n # verify we can find a bookmark by url and check tags, etc\r\n check_url = 'http://jekyllrb.com/'\r\n check_url_hashed = generate_hash(check_url)\r\n found = Bmark.query.filter(Bmark.hash_id == check_url_hashed).one()\r\n\r\n self.assertTrue(\r\n found.hashed.url == check_url, \"The url should match our search\")\r\n self.assertEqual(\r\n len(found.tags), 6,\r\n \"We should have gotten 6 tags, got: \" + str(len(found.tags)))\r\n\r\n # and check we have a right tag or two\r\n self.assertTrue(\r\n 'ruby' in found.tag_string(),\r\n 'ruby should be a valid tag in the bookmark')\r\n\r\n # and check the long description field\r\n self.assertTrue(\r\n 'added for test' in found.extended,\r\n \"'added for test' should be in the extended description\")", "def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None" ]
[ "0.6856743", "0.65518546", "0.6428907", "0.6350477", "0.62410676", "0.60915834", "0.6087781", "0.60498935", "0.5918418", "0.5895832", "0.58815116", "0.58612406", "0.5837969", "0.5769733", "0.57486796", "0.56863636", "0.5678783", "0.56738734", "0.5595063", "0.55606997", "0.5549304", "0.5548431", "0.55410564", "0.5496636", "0.54948056", "0.548124", "0.54759693", "0.5466703", "0.546168", "0.5461514" ]
0.7514746
0
Returns the Ghanaian name of the user when based on inputted birthdate. When user answers yes, the meaning of the name will be displayed
def get_ghname(): #Place Day and Name in dictonaries, one for male and female fem_gh_name = {"Monday": "Adwoa", "Tuesday": "Abena", "Wednesday": "Akua", "Thursday": "Yaa", "Friday": "Afua", "Saturday": "Ama", "Sunday": "Akosua"} male_gh_name = {"Monday": "Kwadwo/Kojo", "Tuesday": "Kwabena", "Wednesday": "Kwaku", "Thursday": "Yaw", "Friday": "Kofi", "Saturday": "Kwame", "Sunday": "Akwasi/Kwesi"} birthday_str = input('Enter birthday (format mm/dd/yyyy): ') bth_weekday = date_week_born.get_day(birthday_str) gender = input('Enter your gender (male or female) No extra spaces: ') # Print the name /Day based on gender. Also validate if the user entered a valid answer. If not, user will be prompted again. while True: if gender in( 'male', 'female'): if gender == 'female': print("Since you were born on %s, your Akan name is %s" % (bth_weekday, fem_gh_name[bth_weekday])) else: print("Since you were born on %s, your Akan name is %s" % (bth_weekday, male_gh_name[bth_weekday])) break else: print("Please enter a valid answer for gender(female or male)") gender = input('Enter your gender (male or female): ') print("You were born on a %s" % bth_weekday) # Based on gender, return the name that corresponds to birthdate choice = input("Would you like to know the meaning of your Akan name (y or n): ") while True: try: if choice == 'n': print("Maybe next time %s" % (fem_gh_name[bth_weekday])) else: if gender == 'female': print("The meaning of %s is %s." % (fem_gh_name[bth_weekday], (get_name_meaning(bth_weekday).lower()))) else: print("The meaning of %s is %s." % (male_gh_name[bth_weekday], (get_name_meaning(bth_weekday).lower()))) break except choice not in('y', 'n'): print("Please enter a valid answer(y or n)") choice = input("Would you like to know the meaning of your Akan name (y or n): ") return fem_gh_name[bth_weekday]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def happy_birthday(name):\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday, dear \" + name + \".\")\n print(\"Happy Birthday to you!\")", "def happy_birthday(name, age: hug.types.number = 1):\n return \"Happy {age} Birthday {name}!\".format(**locals())", "def ask_info_player(self) -> str:\n\n print(\"Enter first name : \")\n while True:\n first_name = input()\n if check.check_input_string_special(first_name) is True:\n if check.check_input_string_len(first_name) is True:\n if check.check_input_string_integer(first_name) is True:\n break\n\n print(\"Enter last name : \")\n while True:\n last_name = input()\n if check.check_input_string_special(last_name) is True:\n if check.check_input_string_len(last_name) is True:\n if check.check_input_string_integer(last_name) is True:\n break\n\n print(\"Enter date of birth with this format YEAR-MONTH-DAY : \")\n birthday = check.check_date_input()\n\n print(\n \"Enter a number for choose the gender : \\n\"\n \"1 - Man \\n\"\n \"2 - Women\"\n )\n genre = check.request_selection_with_number(\"Man\", \"Women\", \"none\")\n\n print(\"\\n The player {} {}, {}, birth on {} has been added to the database !\".format(\n first_name,\n last_name,\n genre,\n birthday))\n\n return first_name, last_name, birthday, genre", "def full_name():\n gender = dice.randint(1, 100) # Over 50 is male, under 50 is female\n double_first = dice.randint(1, 100) # Over 10 no\n double_last = dice.randint(1, 100) # Only between 40 and 55\n doctor = dice.randint(1, 1000) # Different for men and women\n # Gender distribution is 50/50 (with only 2 genders),\n # 10% have a double first name,\n # 15 % have a double last name and\n # 1% are doctors.\n name = \"\"\n prefix = \"\"\n\n # We use the prefix to get a clear identifier in case the name can\n # be used for both genders\n if gender <= 50 and double_first <= 10:\n name = double_name(\"male\")\n if name.split(\"-\")[0] in names.woman:\n prefix = \"Herr \"\n elif gender <= 50:\n name = male_name()\n if name in names.woman:\n prefix = \"Herr \"\n elif gender > 50 and double_first <= 10:\n name = double_name(\"female\")\n if name.split(\"-\")[0] in names.man:\n prefix = \"Frau \"\n elif gender > 50:\n name = female_name()\n if name in names.man:\n prefix = \"Frau \"\n\n # Now we add a last name or even a double last name\n if 40 <= double_last < 55:\n name += \" \" + double_name(\"family\")\n else:\n name += \" \" + last_name()\n\n # Last but not least we check if the person is a doctor\n if gender <= 50 and doctor <= 11:\n name = \"Dr. \" + name\n elif gender > 50 and doctor <= 9:\n name = \"Dr. \" + name\n\n # If the prefix isn't empty, we add it to the name\n if prefix:\n name = prefix + name\n return name", "def question_7():\n dob_string = input(\"DOB: \")\n birth_year = int(dob_string[-4:]) # Extract the last 4 characters\n age = REFERENCE_YEAR - birth_year\n print(f\"You were born in {birth_year}\")\n print(f\"You turn/ed {age} in {REFERENCE_YEAR}\")", "def user_stats_birth(df):\n print('\\nCalculating User Birthday...\\n')\n start_time = time.time()\n # Display earliest, most recent, and most common year of birth\n birth_year = df['Birth Year']\n # the most common birth year\n most_common_year = birth_year.value_counts().idxmax()\n print(\"The most common birth year:\", most_common_year)\n # the most recent birth year\n recent = birth_year.max()\n print(\"The most recent birth year:\", recent)\n # the most earliest birth year\n earliest = birth_year.min()\n print(\"The most earliest birth year:\", earliest)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def birthdate_date(self):\n\n bday = self.birthdate\n if bday:\n dt = datetime.datetime.strptime(\n bday,\n \"%d %b %Y\") # not sure if this format even complies with spec\n # dt = dateutil.parser.parse(bday) # this will guess at a bunch of different formats\n # dt = arrow.get(bday)\n return dt.date()\n else:\n return bday # None", "def date_of_birth(self) -> str:\n return self._date_of_birth.strftime('%Y-%m-%d')", "def age(birthdate):\n today=date.today()\n birthdate=date(birthdate[2],birthdate[1],birthdate[0])\n if birthdate>today:\n return \"Person has not been born yet!\"\n difference=today-birthdate\n return difference.days", "def describe_user(self):\n print(self.first_name.title() + \" \" + self.last_name.title() +\n \" is a \" + str(self.age) + \" year old who identifies as \" +\n self.gender + \".\")", "def name_gen(sex): #input character sex\r\n\tname=\"\"\r\n\tif sex==\"male\":\r\n\t\treturn stellagama.random_line(\"malenames.txt\") #output random male name\r\n\telif sex==\"female\":\r\n\t\treturn stellagama.random_line(\"femalenames.txt\") #output random female name\r\n\telse:\t#in case of wrong input\r\n\t\treturn \"Tokay\" #output placeholder\r", "def female_name():\n return dice.choice(names.woman)", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def male_name():\n return dice.choice(names.man)", "def greet_user(self):\n\t\tprint(f\"How are you doing {self.first_name.title()}?\")", "def name():\n\treturn input('Masukkan Nama : ')", "def checkUser(username):\n if not str(username).isalpha():\n logger.info(\"rejecting user name %s: incorrect format\" % username)\n return \"Parameter 'username' must be alphabetic characters only\", 400\n # convert user name to lower case before DB query\n thisUser = str(username).lower()\n userBday = dbQuery(thisUser)\n if userBday == ERR_NOT_FOUND:\n logger.info(\"no birth date found in DB for user %s\" % username)\n return \"no birth date found in DB for user %s\" % username, 400\n # now we have a date of birth for this user - let's check when the birthday is\n daysLeft = calcDays(userBday)\n # construct reponse JSON object\n if daysLeft == 0:\n logger.info(\"wished %s a happy birthday\" % username)\n resp = {\"message\": \"Hello, %s! Happy birthday!\" % username}\n else:\n resp = {\"message\": \"Hello, %s! Your birthday is in %s day(s)\" % (username, daysLeft)}\n # return JSON response and 200 HTTP status\n return jsonify(resp), 200", "def next_birthday(self):\n if self.birthday == datetime.date.today():\n print(\"Happy Birthday!\")\n\n else:\n current_year = datetime.date.today().year\n birth_month = self.birthday.month\n birth_day = self.birthday.day\n print(datetime.date(current_year, birth_month, birth_day))", "def birth_date(self) -> str:\n return self._birth_date", "def hometown_greeting(hometown, first_name, last_name):\n if is_hometown(hometown):\n greeting = \"Hi %s, we're from the same place!\" % full_name(first_name, last_name)\n else:\n greeting = \"Hi %s, where are you from?\" % full_name(first_name, last_name)\n return greeting", "def double_name(gender):\n # gender sets whether the generated double name is male, female or\n # a family name\n if gender == \"male\":\n return double_name_male()\n elif gender == \"female\":\n return double_name_female()\n elif gender == \"family\":\n return double_name_last()", "def welcome_user():\n print('Welcome to the Brain Games!')\n name = prompt.string('May I have your name? ')\n print('Hello, {0}!'.format(name)) # noqa: WPS421\n return name", "async def profile_birthday(self, ctx):\n profile = await self.cache.get_profile(ctx.author.id)\n if not profile.birthday:\n res = \"You have not set your birthday on your profile yet.\"\n return await ctx.send_line(res, ctx.author.avatar_url)\n res = f\"Your birthday is on {profile.birthday.strftime('%e %B')}.\"\n await ctx.send_line(res, ctx.author.avatar_url)", "def hometown_greeting(hometown, first_name, last_name):\n\n\tif is_hometown(hometown):\n\t\tprint \"Hi \" + full_name(first_name, last_name) + \", we're from the same place!\"\n\telse:\n\t\tprint \"Hi \" + full_name(first_name, last_name) + \", where are you from?\"", "def test_name(name):\n # To work with the name, we remove the address and then\n # split it by its blanks\n name = name.split(\",\")[0]\n name = name.split()\n # First, we check whether the fictional person is a doctor or not\n doctor = 0\n if \"Dr.\" in name:\n doctor = 1\n\n # We save the results in a list\n result = [doctor]\n # Next we look at whether the person has a double first name\n if \"-\" in name[-2]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check if the person hat a double last name.\n if \"-\" in name[-1]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check whether the person is male or female.\n first_name = name[-2]\n if result[1] == 1:\n first_name = (first_name.split(\"-\"))[-2]\n if (first_name in names.woman and \"Herr\" not in name) or \"Frau\" in name:\n result.append(\"female\")\n elif (first_name in names.man and \"Frau\" not in name) or \"Herr\" in name:\n result.append(\"male\")\n return result", "def get_name():\n return raw_input(\"What's your name? \")", "def staff_birthdays():\n try:\n staff_docs.birthday_list()\n\n except FileNotFoundError:\n popup_error(r\"\"\"Please Generate the file first from MYOB Payroll \n under the Employees reportand name it Birthday.csv.\nPlace in J:\\Quality Data\\Data Technician\\StaffDbases\\n\nThe File should have fields\n- Employee Code\n- Employee Full Name\n- Employee Status\n- Employee Occupation\n- Employee Start Date\n- Employee Birthdate\n- Employee Cost Centre Name\"\"\")", "def get_name():\n return \"Boss\"", "def describe_user(self):\n print(\"\\n Name: \" + self.f_name.title() + ' ' + self.l_name.title())\n print(\"Age: \" + str(self.age)) \n print(\"Birthplace: \" + self.birthplace.title())", "def birth_date(self, birth_date: str):\n if birth_date is None:\n raise ValueError(\"Invalid value for `birth_date`, must not be `None`\") # noqa: E501\n\n self._birth_date = birth_date" ]
[ "0.6952868", "0.6743409", "0.61124504", "0.60920054", "0.5872237", "0.5853385", "0.58432305", "0.5835405", "0.5811491", "0.57774514", "0.5771373", "0.57697767", "0.5742696", "0.56782097", "0.56620866", "0.5659846", "0.56561166", "0.5644445", "0.5635021", "0.5591319", "0.5555805", "0.55387336", "0.55329967", "0.55300033", "0.5525429", "0.5498484", "0.5492114", "0.54610014", "0.5460624", "0.5454122" ]
0.7638637
0
Return the meaning of user's name based on the week_day parameter
def get_name_meaning(week_day): dict_meaning = {'Tuesday': "Full of fire and determination, inspirer, risk-taker, will go through a period of uncertainty that will lead to change", "Monday": "The peacemaker, calm and cool, satisfied when doing things to help others, rarely bored""", "Wednesday": "Loving, able to bring people together, don’t prioritize work over people", "Thursday": "Strong-willed and powerful leader, engages in social and political resistance", "Friday": "An adventurous wanderer, full of growth, creative, will find purpose in an accidental situation", "Saturday":"Wise, gentle, happy, able to bring peace in troublesome situations, reinvigorated by spiritual experiences and will become involved in a major cause", "Sunday": "A born leader in bringing people together, protective of friends and family"} return dict_meaning[week_day]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekday_name(day_of_week):\n\n weekday_names = [\n 'Sunday', \n 'Monday', \n 'Tuesday', \n 'Wednesday', \n 'Thursday', \n 'Friday', \n 'Saturday']\n \n if day_of_week < 1 or day_of_week > 7:\n return 'None! Sowwy.'\n\n if day_of_week == 1:\n print(weekday_names[0])\n if day_of_week == 2:\n print(weekday_names[1])\n if day_of_week == 3:\n print(weekday_names[2])\n if day_of_week == 4:\n print(weekday_names[3])\n if day_of_week == 5:\n print(weekday_names[4])\n if day_of_week == 6:\n print(weekday_names[5]) \n if day_of_week == 7:\n print(weekday_names[6])", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def weekdayname(self, date):\n weekday = weekdayname_msgid(date.dow())\n return translate(weekday, domain='plonelocales',\n context=self.request, default=weekday)", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]", "def __str__(self):\n return self.day_of_week", "def weekday_name(day_of_week):\n i = 0\n weekdays = [\"Sunday\", \"Monday\", \"Tuesday\",\n \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n\n while i < len(weekdays):\n if i + 1 == day_of_week:\n print(weekdays[i])\n i = i + 1", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def day_name(x):\r\n if x==0:\r\n return \"Sunday\"\r\n elif x==1:\r\n return \"Monday\"\r\n elif x==2:\r\n return \"Tuesday\"\r\n elif x==3:\r\n return \"Wednesday\"\r\n elif x==4:\r\n return \"Thursday\"\r\n elif x==5:\r\n return \"Friday\"\r\n elif x==6:\r\n return \"Saturday\"", "def day_name(self):\n ref = Date(11, 11, 2019)\n day_names = ['Monday', 'Tuesday', 'Wednesday', \n 'Thursday', 'Friday', 'Saturday', 'Sunday']\n days = self.days_between(ref)\n self_day = day_names[days%7]\n return self_day", "def day_of_week(self) -> str:\n return self.elements[4]", "def formatWeekDay(self, day):\n return '<th class=\"day\">%s</th>' % day_abbr[day]", "def get_weekday():\n try:\n day = config.getint(\"threadbot\", \"debug_day\")\n except ConfigParser.NoOptionError:\n d = datetime.date.today()\n day = d.weekday()\n sort_by_new = False\n\n # 0 / Monday / Feedback thread\n # 1 / Tuesday / How do I make this sound thread\n # 2 / Wednesday / There are no stupid questions thread\n # 3 / Thursday / Marketplace thread\n dayname = \"waffles\"\n if day == 0:\n dayname = \"monday\"\n sort_by_new = True\n elif day == 1:\n dayname = \"tuesday\"\n sort_by_new = True\n elif day == 2:\n dayname = \"wednesday\"\n sort_by_new = True\n elif day == 3:\n dayname = \"thursday\"\n sort_by_new = False\n else:\n sys.exit(1) # woo inelegance\n\n return dayname, sort_by_new", "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def day_of_the_week(arg):", "def _get_user_name(self):\n if self.runtime.get_real_user is None:\n return 'staff'\n else:\n return self.runtime.get_real_user(self.runtime.anonymous_student_id).username", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def set_week_day(self, wday):\r\n\t\twdays = ['Domingo', 'Lunes', 'Martes', 'Miercoles',\r\n\t\t\t\t 'Jueves', 'Viernes', 'Sabado']\r\n\t\tfor i in range(7):\r\n\t\t\tif wday == i: \r\n\t\t\t\treturn wdays[i]", "def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def format_user_for_slack(user):\n if getattr(user, \"last_name\", None):\n return f\"<@{user.last_name}>\"\n return user.email", "def get_day_name(day_number):\n day_dict = {\n 0: 'Sunday',\n 1: 'Monday',\n 2: 'Tuesday',\n 3: 'Wednesday',\n 4: 'Thursday',\n 5: 'Friday',\n 6: 'Saturday'\n }\n return day_dict[day_number]", "def WeekdayName(num, length=99):\n if num < 1 or num > NUM_WEEKDAYS:\n raise ValueError('Bad weekday number')\n return _WEEKDAY_NAMES[num][:length]", "def get_weekday_number(date):\n return date.strftime('%w')", "def get_weekday(self):\n weekdays = dict(PRODUCT_WEEKDAYS)\n return weekdays.get(self.weekday, \"N/A\")", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]" ]
[ "0.69684196", "0.66607773", "0.66087294", "0.6585144", "0.6446677", "0.63943785", "0.63613486", "0.6343028", "0.6343028", "0.6283567", "0.62727726", "0.6233579", "0.62119097", "0.618736", "0.6144941", "0.61427206", "0.60637295", "0.59701353", "0.5966777", "0.59397113", "0.59397113", "0.59219515", "0.5918172", "0.5904339", "0.58957493", "0.58670354", "0.584292", "0.5827391", "0.58140284", "0.57850015" ]
0.74635184
0
Retrieves the specified metadata from the metadata server.
def get_metadata(key=''): response, content = httplib2.Http().request( '%s/%s' % (METADATA_BASE_URL, key), headers={'Metadata-Flavor': 'Google'}, method='GET', ) if response['status'] == '404': raise NotFoundError(response, content) return content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fetch_metadata(self, route: str):\n data = await self.http.get_metadata(route)\n return data", "def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()", "def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata", "def get_metadata (self, name):\n return self.metadata.get(name)", "def metadata_get(self, endpoint_name=None, key=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/metadata/%s' % key, 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/metadata/%s' % (endpoint_name, key), 'GET')\n return body", "def test_get_server_metadata_item(self):\n metadata_response = self.servers_client.get_server_metadata_item(\n self.server.id, 'meta_key_1')\n metadata = metadata_response.entity\n self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')", "def fetch_metadata (self, id):\n payload = {\n 'movieid': id,\n 'imageformat': 'jpg',\n '_': int(time())\n }\n response = self._session_get(component='metadata', params=payload, type='api')\n return self._process_response(response=response, component=self._get_api_url_for(component='metadata'))", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata", "def test_api_can_get_metadata(self):\n response = self.client.get('/metadata/', format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def get_server_metadata(self, name):\n raise NotImplementedError", "def test_list_server_metadata(self):\n metadata_response = self.servers_client.list_server_metadata(\n self.server.id)\n metadata = metadata_response.entity\n self.assertEqual(200, metadata_response.status_code)\n self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')\n self.assertEqual(metadata.get('meta_key_2'), 'meta_value_2')", "def get_metadata(self):\n url = 'https://www150.statcan.gc.ca/t1/wds/rest/getCubeMetadata'\n payload = [{'productId': int(self.productId)}]\n print('Retreiving metadata for Product ID: ' + self.productId)\n req = requests.post(\n url,\n json=payload\n )\n response = req.json()\n if (response[0]['status'] == \"SUCCESS\"):\n return(response[0]['object'])\n else:\n self.errors = response\n print('ERROR: Metadata for Product ID ' + self.productId + ' could not be loaded.')\n print('ERROR: see Product.errors() for more info')", "def get(self, id):\n\n kparams = KalturaParams()\n kparams.addIntIfDefined(\"id\", id);\n self.client.queueServiceActionCall(\"metadata_metadata\", \"get\", \"KalturaMetadata\", kparams)\n if self.client.isMultiRequest():\n return self.client.getMultiRequestResult()\n resultNode = self.client.doQueue()\n return KalturaObjectFactory.create(resultNode, 'KalturaMetadata')", "def GetMetadata(self):\n return self.dict['meta']", "def get_metadata(self):\n try:\n r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n r.raise_for_status()\n output = r.json()\n if 'error' in output:\n raise requests.exceptions.RequestException(output['error'])\n return output", "async def _get_metadata(self, server: model.Server,\n realmID, profileID):\n api_url = ('https://eu.api.blizzard.com/sc2/'\n f'metadata/profile/{server.id()}/{realmID}/{profileID}')\n payload = {'locale': 'en_US',\n 'access_token': await self.get_access_token()}\n data, status = await self._perform_api_request(api_url, params=payload)\n if status != 200:\n raise InvalidApiResponse(f'{status}: {api_url}')\n return data", "def show(self, req, server_id, id):\n context = req.environ['nova.context']\n try:\n data = self._get_metadata(context, server_id)\n except exception.InstanceNotFound:\n msg = _('Server %(server_id)s does not exist') % locals()\n raise exc.HTTPNotFound(explanation=msg)\n\n try:\n return {id: data['metadata'][id]}\n except KeyError:\n msg = _(\"metadata item %s was not found\" % (id))\n raise exc.HTTPNotFound(explanation=msg)", "def get(self):\n return self._metadata", "def index(self, req, server_id):\n context = req.environ['nova.context']\n try:\n return self._get_metadata(context, server_id)\n except exception.InstanceNotFound:\n msg = _('Server %(server_id)s does not exist') % locals()\n raise exc.HTTPNotFound(explanation=msg)", "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def get_metadata(\n self, scope, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.get_metadata.metadata['url']\n path_format_arguments = {\n 'scope': self._serialize.url(\"scope\", scope, 'str', skip_quote=True)\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/xml'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters, header_parameters)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n raise models.QueryFailureException(self._deserialize, response)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('str', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def get_metadata(self):\n return self._metadata", "def test_that_metadata_is_fetched(self):\n # Get value of 'param' parameter.\n param_name = 'param'\n metadata_url = '%s/%s' % (\n gce_metadata_services.METADATA_ATTRIBUTES_URL, param_name)\n metadata_headers = gce_metadata_services.METADATA_HEADERS\n metadata_value = 'value'\n\n with self.put_get_request(\n metadata_url, metadata_value, 200, metadata_headers):\n value = gce_metadata_services.get_metadata_param(param_name)\n self.assertEqual(value, 'value')", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def GetMetadataSample():\n client = CreateClient()\n # Fetch the metadata entry and display bits of it\n metadata = client.GetMetadata()\n print 'Quota'\n print ' Total:', metadata.quota_bytes_total.text\n print ' Used:', metadata.quota_bytes_used.text\n print ' Trashed:', metadata.quota_bytes_used_in_trash.text\n print 'Import / Export'\n for input_format in metadata.import_formats:\n print ' Import:', input_format.source, 'to', input_format.target\n for export_format in metadata.export_formats:\n print ' Export:', export_format.source, 'to', export_format.target\n print 'Features'\n for feature in metadata.features:\n print ' Feature:', feature.name.text\n print 'Upload Sizes'\n for upload_size in metadata.max_upload_sizes:\n print ' Kind:', upload_size.kind, upload_size.text", "def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents", "def get_metadata(self, private_key, client_id):\n return self._samp_hub.getMetadata(private_key, client_id)", "async def get_server_metadata(\n self, headers: dict[str, t.Any] = ..., as_json: t.Literal[False] = ...\n ) -> service_pb2.ServerMetadataResponse:", "def get_metadata(self, source, graph):\n return self.server.get_metadata(source, self.graphs.get(graph))" ]
[ "0.7245699", "0.7151928", "0.7104588", "0.696615", "0.6956422", "0.6926909", "0.68931544", "0.6870761", "0.6794623", "0.67537457", "0.67467815", "0.66921633", "0.6678577", "0.6670239", "0.66276926", "0.6622276", "0.6594163", "0.6577797", "0.6537819", "0.65259176", "0.6484931", "0.6468234", "0.6440317", "0.6399379", "0.63966256", "0.6366586", "0.634676", "0.634585", "0.63442475", "0.63422585" ]
0.72082514
1
Acknowledges the receipt of messages on a Cloud Pub/Sub subscription.
def acknowledge(self, subscription, project, ack_ids): response, content = self._http.request( '%s/%s/subscriptions/%s:acknowledge' % ( PUBSUB_BASE_URL, project, subscription), body=json.dumps({'ackIds': ack_ids}), method='POST', ) if response['status'] == '404': raise NotFoundError(response, json.loads(content)) return json.loads(content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def acknowledge(self, *event_messages, bus_client: \"BusClient\"):\n pass", "def on_subscribe(self, mqtt_client, userdata, mid, granted_qos):\n logging.debug(\"DEBUG - subscribe ack received\")", "def MessageAck(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __call__(\n self,\n request: pubsub.AcknowledgeRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ):\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"post\",\n \"uri\": \"/v1/{subscription=projects/*/subscriptions/*}:acknowledge\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_acknowledge(request, metadata)\n pb_request = pubsub.AcknowledgeRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)", "def acknowledge_message(self, delivery_tag):\n logger.info('Acknowledging message %s process %s consumer_id %s', delivery_tag, threading.current_thread, str(self.consumer_id))\n self._channel.basic_ack(delivery_tag)", "def acked(err, msg):\n if err is not None:\n print(\"Failed to deliver message: {}\".format(err))\n else:\n print(\"Produced record to topic {} partition [{}] @ offset {}\"\n .format(msg.topic(), msg.partition(), msg.offset()))", "def acknowledge_message(self, delivery_tag):\n self.logger.info('acknowledging message %s', delivery_tag)\n self._channel.basic_ack(delivery_tag)", "def acked(err, msg):\n if err is not None:\n print(\"failed to deliver message: {}\".format(err.str()))\n else:\n print(\"produced to: {} [{}] @ {}\".format(msg.topic(), msg.partition(), msg.offset()))", "def ack(self):\n self.consumer.ack(self)", "def acknowledge(self, message: Message[ValueType]) -> Message[ValueType]:", "def acknowledge_message(self, channel, delivery_tag):\n logger.info('Acknowledging message %s', delivery_tag)\n channel.basic_ack(delivery_tag)", "def MessageAck(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def _acknowledge_message(self, app, message):\n self.client.delete_message(\n QueueUrl=self.app.settings['SQS_INBOUND_QUEUE_URL'],\n ReceiptHandle=message['ReceiptHandle'],\n )", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def on_nack(self, message: PubsubMessage, ack: Callable[[], None]):\n pass", "async def ack_event(self, envelope: Envelope, multiple: bool = False) -> None:\n await self.channel.basic_client_ack(\n delivery_tag=envelope.delivery_tag, multiple=multiple\n )", "def acknowledge_prepayment(self):\n self.acknowledge_payment()", "def acknowledge(self, sequence: int):\n ackPacket = Rudp.Packet(self.seq, sequence)\n frame = ackPacket.construct()\n self.seqPlusOne()\n self.socket.sendto(frame, self.client)", "def ack_event(self, event_id):\n return self.client.call('Notification_Occurrence_Event', 'acknowledgeNotification', id=event_id)", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def acknowledgePacket(self, packet: Rudp.Packet):\n self.acknowledge(packet.seq)", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )", "def ack_message(channel, delivery_tag):\n global logger\n if channel.is_open:\n channel.basic_ack(delivery_tag)\n logger.debug(\"Channel is acked!\")\n else:\n # Channel is already closed, so we can't ACK this message;\n # log and/or do something that makes sense for your app in this case.\n logger.debug(\"Channel is closed!\")", "def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )", "def on_publish(self, mqtt_client, userdata, mid):\n logging.debug(\"DEBUG - publish ack received\")", "async def aoc_subscribe(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n role = ctx.guild.get_role(settings.aoc.role_id)\n unsubscribe_command = f\"{ctx.prefix}{ctx.command.root_parent} unsubscribe\"\n\n if role not in ctx.author.roles:\n await ctx.author.add_roles(role)\n await ctx.send(\n \"Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. \"\n f\"You can run `{unsubscribe_command}` to disable them again for you.\"\n )\n else:\n await ctx.send(\n \"Hey, you already are receiving notifications about new Advent of Code tasks. \"\n f\"If you don't want them any more, run `{unsubscribe_command}` instead.\"\n )", "def acknowledgement(self, message: Message[ValueType]):", "def cbMqtt_on_subscribe(client, userdata, mid, granted_qos):\n # logger.debug('Subscribed to MQTT topic with message id %d', mid)\n pass" ]
[ "0.62813425", "0.6150225", "0.6145892", "0.6101863", "0.60896355", "0.603904", "0.6010502", "0.5991581", "0.5966796", "0.5925992", "0.58987975", "0.586228", "0.582372", "0.5721523", "0.5721523", "0.55810386", "0.55795044", "0.5551798", "0.5529495", "0.5479159", "0.54706067", "0.5461998", "0.5418777", "0.54105115", "0.54012686", "0.53808856", "0.5378522", "0.5348259", "0.5333484", "0.53333384" ]
0.68074644
0
Validate model spec Raises SpecError If the spec is not wellformatted.
def validate(self): logger.debug("Validating spec: %s", self._spec) try: validate(instance=self._spec, schema=MODEL_SPEC_SCHEMA) except ValidationError as e: raise SpecError(e.message) from e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swagger_content_validator(spec_body):\n version = spec_body['swagger']\n if version.startswith('1'):\n return 'Deprecated Swagger version. Please visit http://swagger.io for information on upgrading to Swagger 2.0'\n\n with open(config.JSON_SCHEMA) as schema:\n swagger_schema = json.loads(schema.read())\n\n try:\n validate(spec_body, swagger_schema)\n except ValidationError as ex:\n return str(ex)\n\n return 'OK'", "def validate(self):\r\n return self.specs.validate(self)", "def validate_notebook_model(self, model):\n try:\n validate(model[\"content\"])\n except ValidationError as e:\n model[\"message\"] = \"Notebook Validation failed: {}:\\n{}\".format(\n e.message,\n json.dumps(e.instance, indent=1, default=lambda obj: \"<UNKNOWN>\"),\n )\n return model", "def assertValid(self, data, requirement, msg=None):\n # Setup traceback-hiding for pytest integration.\n __tracebackhide__ = lambda excinfo: excinfo.errisinstance(ValidationError)\n\n try:\n validate(data, requirement, msg=msg)\n except ValidationError as err:\n def should_truncate(line_count, char_count):\n return self.maxDiff and (char_count > self.maxDiff)\n err._should_truncate = should_truncate\n\n err._truncation_notice = \\\n 'Diff is too long. Set self.maxDiff to None to see it.'\n\n raise err", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def _validate_markdown_spec(\n self, spec: standard_component_specs, markdown_dict: dict\n ):\n for key in spec.MARKDOWN_DICT:\n if key not in markdown_dict:\n raise ValueError(f\"Missing mandatory key - {key}\")\n if key in markdown_dict:\n self._type_check(\n actual_value=markdown_dict[key],\n key=key,\n spec_dict=spec.MARKDOWN_DICT,\n )", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def swagger_validator(url):\n if url is None:\n return \"Can't read from file null\", config.INVALID_BADGE\n\n spec_req = requests.get(url)\n if spec_req.status_code != 200:\n return \"Can't read from file {}\".format(url), config.INVALID_BADGE\n\n try:\n if spec_req.headers['content-type'] == 'application/json':\n spec_body = json.loads(spec_req.text)\n else:\n spec_body = json.loads(json.dumps(yaml.load(spec_req.text)))\n\n version = spec_body['swagger']\n if version.startswith('1'):\n return (\n 'Deprecated Swagger version. Please visit http://swagger.io '\n 'for information on upgrading to Swagger 2.0', config.UPGRADE_BADGE)\n except Exception as ex:\n log.error(exception=str(ex))\n return 'Unable to parse content. It may be invalid JSON or YAML', config.ERROR_BADGE\n\n with open(config.JSON_SCHEMA) as schema:\n swagger_schema = json.loads(schema.read())\n\n try:\n validate(spec_body, swagger_schema)\n except ValidationError as ex:\n return str(ex), config.INVALID_BADGE\n\n return 'OK', config.VALID_BADGE", "def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)", "def test_001_validate_with_bad_properties(self):\n m = schematics_flexible.BaseFlexible(\n {'code': '06',\n 'properties': {\"a\": \"this is test\"}},\n store_handler=get_mock())\n try:\n m.validate()\n except schematicsValidationError:\n pass\n else:\n self.assertTrue(False,\n 'Model must raise exception when validate raise')", "def verify_spec_name(spec_name):\n if not isinstance(spec_name, text_type):\n raise ValueError(\n \"expected spec name of string type, but got '{0}' of type '{1}'\".\n format(spec_name, to_str(type(spec_name))))", "def validate_format(self):\n raise NotImplementedError()", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def validate_model_string(self, model):\r\n if model:\r\n if '/' not in model:\r\n raise ValueError(\r\n 'Model must be specified in the form of '\r\n '\\'{username}/{model-slug}/{framework}/{variation-slug}/{version-number}\\''\r\n )\r\n\r\n split = model.split('/')\r\n if not split[0] or not split[1]:\r\n raise ValueError('Invalid model specification ' + model)", "def validate(self, spec):\n d = spec.directory\n for file_name in os.listdir(d):\n if file_name.endswith(\".icon\"):\n if \" \" in file_name:\n raise ValidationException(f\"The .icon file name was '{file_name}'.\\n \"\n \".icon file may not contain spaces use a '_' instead.\")", "def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")", "def is_model_valid(self):\n try:\n payload = {\n \"modelurl\": self.model_builder_url + self.model_uuid,\n \"api_key\": self.web2nl_api_key\n }\n\n response = requests.get(self.web2nl_url + \"validations\", params=payload)\n if response.status_code is requests.codes.no_content:\n return True\n elif response.status_code is requests.codes.bad_request:\n self.logger.error(\"Model validation failed: \" + response.text)\n return False\n else:\n self.logger.error(\"Failed while validating model. Will retry in some time\")\n raise RuntimeError(\"Failed while validating model\")\n except requests.exceptions.ConnectionError as errc:\n self.logger.error(\"Error Connecting:\", errc)", "def validate_model(program_name, model_dictionary, model_context, aliases, wlst_mode,\n validate_crd_sections=True):\n _method_name = 'validate_model'\n\n try:\n validator = Validator(model_context, aliases, wlst_mode=wlst_mode, validate_crd_sections=validate_crd_sections)\n\n # no need to pass the variable file for processing, substitution has already been performed\n return_code = validator.validate_in_tool_mode(model_dictionary, variables_file_name=None,\n archive_file_name=model_context.get_archive_file_name())\n except ValidateException, ex:\n __logger.severe('WLSDPLY-20000', program_name, ex.getLocalizedMessage(), error=ex,\n class_name=_class_name, method_name=_method_name)\n tool_exception = \\\n exception_helper.create_exception(aliases.get_exception_type(), 'WLSDPLY-20000', program_name,\n ex.getLocalizedMessage(), error=ex)\n __logger.throwing(tool_exception, class_name=_class_name, method_name=_method_name)\n raise tool_exception\n\n if return_code == Validator.ReturnCode.STOP:\n __logger.severe('WLSDPLY-20001', program_name, class_name=_class_name, method_name=_method_name)\n tool_exception = \\\n exception_helper.create_exception(aliases.get_exception_type(), 'WLSDPLY-20001', program_name)\n __logger.throwing(tool_exception, class_name=_class_name, method_name=_method_name)\n raise tool_exception", "def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)", "def test_03_validate_team_name_presence(self):\n try:\n self.record.full_clean()\n except ValidationError as e:\n self.assertTrue('This field cannot be blank.' in e.message_dict['team_name'])", "def is_valid(self):\n return _drafter.check_blueprint(self.content)", "def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def validate_raw_block_layout(raw_block_layout):\n\n # Confirm that each layer is a list of block specifications where\n # each block specification has length 2 (i.e. block_name,num_repeats)\n for raw_layer_layout in raw_block_layout:\n for raw_block_spec in raw_layer_layout.split('-'):\n if len(raw_block_spec.split(',')) != 2:\n raise Exception(INVALID_BLOCK_SPEC_ERR.format(raw_block_spec))", "def is_valid(self):\n return self.scenario.is_valid()", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def validate(self, doc):\n return self.schema.validate(doc)", "def test_is_valid_valid_resume(self):\n self.assertTrue(resumeschema.is_valid(self.valid_resume))", "def v_err(flaw):\n error_messages = {\n 'no_season': _(\n \"Season must contain at least 4 alphanumeric characters.\"\n ),\n 'no_items': _(\n \"Menu must contain at least 1 item.\"\n ),\n 'no_name': _(\n \"Name field must contain at least 4 alphanumeric characters.\"\n ),\n 'no_desc': _(\n \"Description must contain at least 10 characters.\"\n ),\n 'no_chef': _(\n \"Item must belong to a chef.\"\n ),\n 'no_ing': _(\n \"Item must contain at least 1 ingredient.\"\n ),\n 'elapsed': _(\n \"This date has elapsed.\"\n )\n }\n raise forms.ValidationError(\n error_messages[flaw],\n code=flaw,\n )" ]
[ "0.61822677", "0.60357326", "0.5775823", "0.53280115", "0.52456456", "0.51503146", "0.51440585", "0.51136863", "0.510787", "0.5099875", "0.50923747", "0.5076183", "0.5076088", "0.5070361", "0.50431174", "0.5036382", "0.499781", "0.499263", "0.49831238", "0.49813223", "0.4969112", "0.49654192", "0.4965228", "0.49648857", "0.49586108", "0.49256852", "0.48922065", "0.48825708", "0.4858029", "0.48515737" ]
0.7007413
0
Return Model artifact URI
def model_uri(self) -> str: return self._spec["model"]["uri"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_artifact(self):\n pass", "def get_single_uri(artifact_list: List[Artifact]) -> Text:\n return get_single_instance(artifact_list).uri", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")", "def get_uri(self):\n return self.url", "def get_url(self):\n raise NotImplementedError(\"This asset does not have a URL\")", "def artifact_file(self) -> aws_cdk.aws_codepipeline.ArtifactPath:\n return jsii.get(self, \"artifactFile\")", "def download_model_artifact(self, artifact_name:str=None, alias:str=None):\n # TODO: extract run's metadata\n if self.use_wandb:\n artifact_dir, artifact = self.wandb.download_model_artifact(path=artifact_name, alias=alias)\n return artifact_dir, artifact\n else:\n self.log_message(\"Does not support download dataset artifact from Weight & Biases database.\")\n\n return None, None", "def get_uri(self):\r\n return self.uri", "def uri(self) -> str:\n return self._uri", "def _download_model_version_files(self):\n model_version = self.client.get_model_version(name=self.model_name, version=self.model_version)\n artifact_uri = model_version.source\n return _download_artifact_from_uri(artifact_uri)", "def get_artifact(self) -> artifact.Artifact:\n if self.metadata_file and self.output_name:\n return entrypoint_utils.get_artifact_from_output(\n self.metadata_file, self.output_name)\n else:\n # Provide an empty schema when returning a raw Artifact.\n result = artifact.Artifact(\n instance_schema=artifact.DEFAULT_ARTIFACT_SCHEMA)\n result.uri = self.uri\n return result", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def url(self) -> str:\n return self.url_as()", "def getURI(self):\n return _libsbml.SBase_getURI(self)", "def get_artifact(self) -> artifact.Artifact:\n if self.metadata_file and self.output_name:\n return entrypoint_utils.get_artifact_from_output(\n self.metadata_file, self.output_name)\n else:\n # Provide an empty schema when returning a raw Artifact.\n result = artifact.Artifact(\n instance_schema=artifact.DEFAULT_ARTIFACT_SCHEMA)\n result.uri = self.uri\n return result", "def url(self):\n return self.full()", "def get_uri(self):\n return self.__uri", "def uri(self) -> Optional[str]:\n return pulumi.get(self, \"uri\")", "def uri(cls):\n return f'{cls.app_label}.{cls.name}'", "def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def artifact(cls):\n return relationship.many_to_one(cls, 'artifact')", "def url(self) -> str:\n return self._connection.url + self.path_attribute", "def url(self):\n return self.storage.url(self.name)" ]
[ "0.6964888", "0.66325843", "0.64528686", "0.63969576", "0.6376586", "0.6343797", "0.63117784", "0.6284732", "0.6249571", "0.62446123", "0.62372655", "0.6217305", "0.62148327", "0.62148327", "0.62148327", "0.62148327", "0.62148327", "0.62148327", "0.6201441", "0.62008864", "0.61981136", "0.6182092", "0.61736226", "0.614588", "0.61439854", "0.61353904", "0.61351985", "0.6126052", "0.6121233", "0.61108494" ]
0.7396116
0
Return preprocessing transform if exists
def pre_processing(self) -> Optional[Callable]: if ( "transforms" not in self._spec or "pre" not in self._spec["transforms"] ): # Passthrough return lambda x: x f = find_class(self._spec["transforms"]["pre"]) return f(self.options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_preprocessing(name):\n if name not in preprocessing_fn_map.keys():\n raise ValueError('Preprocessing name [%s] was not recognized.' % name)\n\n return preprocessing_fn_map[name].preprocess_image", "def get_transform_fn():", "def _build_preprocessing(self):\n\n # For now, do nothing\n pass", "def preprocessing(ct):\n return value_preprocessing(ct, False)", "def is_preprocessing(self):\r\n return conf.lib.clang_isPreprocessing(self)", "def _pre_process(self, x):\n return x", "def getPreprocessingTransform(cls, modality):\n if modality == \"CT\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"]), ]\n return Compose(trans)\n elif modality == \"MRI\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"LPS\"),\n Normalized(keys=[\"image\"]),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"])]\n return Compose(trans)", "def preprocessed(self):\n return self._preprocessed", "def get_preprocess_fn(is_training, is_pretrain):\n # Disable test cropping for small images (e.g. CIFAR)\n if FLAGS.image_size <= 32:\n test_crop = False\n else:\n test_crop = True\n color_jitter_strength = FLAGS.color_jitter_strength if is_pretrain else 0.\n return functools.partial(\n data_util.preprocess_image,\n height=FLAGS.image_size,\n width=FLAGS.image_size,\n is_training=is_training,\n color_jitter_strength=color_jitter_strength,\n test_crop=test_crop)", "def parse_preproc(key, content):\n if inspect.isclass(key):\n pp = key(**content)\n key = pp.__class__.__name__.lower()\n elif key.lower() == 'none':\n pp = DummyNone()\n elif key.lower() == 'recenter':\n pp = StandardScaler(with_mean=True, with_std=False)\n elif key.lower() == 'standardize':\n pp = StandardScaler(with_mean=True, with_std=True)\n elif key.lower() == 'normalize':\n content.setdefault('norm', 'l2')\n # pp = Normalizer(norm=content[1][0])\n pp = Normalizer(**content)\n elif key.lower() == 'minmax':\n content.setdefault('feature_range', (0, 1))\n pp = MinMaxScaler(**content)\n else:\n pp = DummyNone()\n return (key, pp, 'preproc')", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def preprocess(config: Config) -> None:\n print(colored(\"preprocessing:\", attrs=[\"bold\"]))\n factory = PreprocessingFactory()\n factory.process(config)", "def pre_processor(self):", "def get_preprocess(self) -> Dict:\n raise NotImplementedError", "def get_transform(self):\n return self.transform", "def get_transform(self):\n raise NotImplementedError", "def compose_before(self, transform):\n if isinstance(transform, self.composes_inplace_with):\n return self._compose_before(transform)\n else:\n # best we can do is a TransformChain, let Transform handle that.\n return Transform.compose_before(self, transform)", "def preprocess(self):", "def any_preprocessing(name):\n return hp.choice('%s' % name, [\n [pca(name + '.pca')],\n [standard_scaler(name + '.standard_scaler')],\n [min_max_scaler(name + '.min_max_scaler')],\n [normalizer(name + '.normalizer')],\n # -- not putting in one-hot because it can make vectors huge\n #[one_hot_encoder(name + '.one_hot_encoder')],\n []\n ])", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocess_transform(self, X: Tensor) -> Tensor:\n if self.transform_on_train:\n # We need to disable learning of bounds here.\n # See why: https://github.com/pytorch/botorch/issues/1078.\n if hasattr(self, \"learn_bounds\"):\n learn_bounds = self.learn_bounds\n self.learn_bounds = False\n result = self.transform(X)\n self.learn_bounds = learn_bounds\n return result\n else:\n return self.transform(X)\n return X", "def preprocessing(self, preprocessing):\n\n self._preprocessing = preprocessing", "def get_transform(self):\n raise NotImplementedError()", "def _get_transforms(loaded_tfm):\n if isinstance(loaded_tfm, tfm.Compose):\n transform = []\n for _tfm in loaded_tfm.transforms:\n if not isinstance(_tfm, OMITTED_TRANSFORMS):\n transform.append(_tfm)\n transform = tfm.Compose(transform)\n else:\n if not isinstance(loaded_tfm, OMITTED_TRANSFORMS):\n transform = loaded_tfm\n else:\n transform = None\n return transform" ]
[ "0.7144515", "0.6530584", "0.64988124", "0.6475517", "0.6416115", "0.63836473", "0.63563347", "0.61520123", "0.6150919", "0.6044033", "0.60155463", "0.60155463", "0.60155463", "0.60155463", "0.60155463", "0.5983216", "0.59528327", "0.5942889", "0.593253", "0.5911872", "0.5861548", "0.5856466", "0.5855524", "0.58434117", "0.58434117", "0.58434117", "0.5836617", "0.58232075", "0.5820437", "0.5802133" ]
0.7789364
0
Return postprocessing transform if exists
def post_processing(self) -> Optional[Callable]: if ( "transforms" not in self._spec or "post" not in self._spec["transforms"] ): # Passthrough return lambda x: x f = find_class(self._spec["transforms"]["post"]) return f(self.options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_transform_fn():", "def pre_processing(self) -> Optional[Callable]:\n if (\n \"transforms\" not in self._spec\n or \"pre\" not in self._spec[\"transforms\"]\n ):\n # Passthrough\n return lambda x: x\n f = find_class(self._spec[\"transforms\"][\"pre\"])\n return f(self.options)", "def transform():\n pass", "def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None", "def compose_after(self, transform):\n if isinstance(transform, self.composes_inplace_with):\n return self._compose_after(transform)\n else:\n # best we can do is a TransformChain, let Transform handle that.\n return Transform.compose_after(self, transform)", "def get_transform(self):\n raise NotImplementedError", "def __get_postprocessor(self) -> Optional[Postprocessor]:\n\n if self.lvp == LVP.COBOL_TO_CSHARP_9:\n return COBOLToCSharp9Postprocessor(self.itl)\n\n return Postprocessor()", "def get_transform(self):\n return self.transform", "def _apply_transform(self):\n pass", "def get_transform(self):\n raise NotImplementedError()", "def _select_transform(self):\n for transform in self.transforms:\n if transform.applies is None or transform.applies(self.ti_dict) is True:\n self.transform = transform\n break\n else:\n raise RuntimeError('No transform found for TI data')", "def transform_fn(self):\n if hasattr(self._transform_fn, 'forward'):\n return self._transform_fn.forward\n return self._transform_fn", "def transform():", "def _post_process(self, x):\n return x", "def transform(self):\n return self._transform", "def make_transform_fn(self, ):\n return self._transform_fn", "def transform(self, results: Dict) -> Optional[Dict]:\n idx = self.random_pipeline_index()\n return self.transforms[idx](results)", "def _compose_after_inplace(self, transform):\n # naive approach - update self to be equal to transform and\n # compose_before_from_vector_inplace\n self_vector = self.as_vector().copy()\n self.update_from_vector(transform.as_vector())\n return self.compose_before_from_vector_inplace(self_vector)", "def transform(self, node):\n return self.get_transform_func(node)(node)", "def apply(self):\n if self.applied:\n raise RuntimeError(\"Transform applied more than once\")\n \n self._apply()\n \n self.applied = True\n \n return self.template", "def get_postprocess(self) -> Dict:\n raise NotImplementedError", "def getPostProcessingTransform(cls, original_spacing, original_size, modality):\n return Compose([\n AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=original_spacing, mode=\"nearest\"),\n Resized(keys=[\"image\"], spatial_size=original_size)\n ])", "def transform(self):\n\n return self._transform", "def _get_transforms(loaded_tfm):\n if isinstance(loaded_tfm, tfm.Compose):\n transform = []\n for _tfm in loaded_tfm.transforms:\n if not isinstance(_tfm, OMITTED_TRANSFORMS):\n transform.append(_tfm)\n transform = tfm.Compose(transform)\n else:\n if not isinstance(loaded_tfm, OMITTED_TRANSFORMS):\n transform = loaded_tfm\n else:\n transform = None\n return transform", "def preprocess_transform(self, X: Tensor) -> Tensor:\n if self.transform_on_train:\n # We need to disable learning of bounds here.\n # See why: https://github.com/pytorch/botorch/issues/1078.\n if hasattr(self, \"learn_bounds\"):\n learn_bounds = self.learn_bounds\n self.learn_bounds = False\n result = self.transform(X)\n self.learn_bounds = learn_bounds\n return result\n else:\n return self.transform(X)\n return X", "def transform(self, results: Dict) -> Optional[Dict]:\n for t in self.transforms:\n results = t(results) # type: ignore\n if results is None:\n return None\n return results", "def get_transform(self, name):\n return self._handlers_by_name[name].get_transform_instance()", "def _worker_fn(example, transform):\n feature = transform(example)\n return feature", "def get_preprocessing(name):\n if name not in preprocessing_fn_map.keys():\n raise ValueError('Preprocessing name [%s] was not recognized.' % name)\n\n return preprocessing_fn_map[name].preprocess_image", "def getPreprocessingTransform(cls, modality):\n if modality == \"CT\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"]), ]\n return Compose(trans)\n elif modality == \"MRI\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"LPS\"),\n Normalized(keys=[\"image\"]),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"])]\n return Compose(trans)" ]
[ "0.6352982", "0.63442636", "0.58065474", "0.5791054", "0.5751158", "0.5738636", "0.56980866", "0.56887895", "0.5667745", "0.5642095", "0.5613712", "0.5607904", "0.5549039", "0.55453587", "0.5538673", "0.5520472", "0.5499307", "0.5463468", "0.5452866", "0.5443362", "0.54308534", "0.5426668", "0.539404", "0.5381791", "0.53671986", "0.5350588", "0.5295971", "0.5288425", "0.52866244", "0.5270748" ]
0.7251341
0
Return a UDF from a given ModelSpec
def udf_from_spec(spec: ModelSpec): if spec.version != "1.0": raise SpecError( f"Only spec version 1.0 is supported, got {spec.version}" ) if spec.flavor == "pytorch": from rikai.spark.sql.codegen.pytorch import generate_udf return generate_udf(spec) else: raise SpecError(f"Unsupported model flavor: {spec.flavor}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def get_vit_fn(model, num_classes, spatial_res):\n model = model.lower()\n assert model in VIT_FNS\n vit_fn = VIT_FNS[model]\n\n def wrapped_vit_model_fn(*args, **kwargs):\n return vit_fn(*args, num_classes=num_classes,\n image_size=spatial_res, **kwargs)\n return wrapped_vit_model_fn", "def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner", "def ufunc_model(name):\n ufunc = getattr(np, name)\n nin = ufunc.nin\n nout = ufunc.nout\n if nin == 1:\n separable = True\n\n def evaluate(self, x):\n return self.func(x)\n\n else:\n separable = False\n\n def evaluate(self, x, y):\n return self.func(x, y)\n\n klass_name = _make_class_name(name)\n\n members = {\n \"n_inputs\": nin,\n \"n_outputs\": nout,\n \"func\": ufunc,\n \"linear\": False,\n \"fittable\": False,\n \"_separable\": separable,\n \"_is_dynamic\": True,\n \"evaluate\": evaluate,\n }\n\n klass = type(str(klass_name), (_NPUfuncModel,), members)\n klass.__module__ = \"astropy.modeling.math_functions\"\n return klass", "def get_model_fn(model, num_classes, spatial_res):\n\n model = model.lower()\n if model == \"cnn\": return get_cnn_fn(model, num_classes)\n if model in RESNET_FNS: return get_resnet_fn(model, num_classes, spatial_res)\n if model in VIT_FNS: return get_vit_fn(model, num_classes, spatial_res)\n if model in EFFICIENTNET_FNS: return get_efficientnet_fn(model, num_classes,\n spatial_res)\n raise ValueError(f\"Model {model} not recognized.\")", "def build_model_fn(self):", "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn", "def import_function(spec, kwargs=None):\n spec = spec.split(':')\n module = spec[0]\n fn = spec[1]\n module = import_module(module)\n fn = getattr(module, fn)\n\n if kwargs is not None:\n fn = functools.partial(fn, **kwargs)\n return fn", "def compile_test_fn(model):\n logger.info(\"Building val_fn\")\n acoustic_input = model.inputs[0]\n network_output = model.outputs[0]\n ctc_input_lengths = K.placeholder(ndim=2, dtype='int32')\n\n\n val_fn = K.function([acoustic_input, ctc_input_lengths,\n K.learning_phase()],\n [network_output])\n return val_fn", "def _get_prepare_dataset_fn_for_model(model_name):\n model_module = importlib.import_module(\n f\"cosmobot_deep_learning.models.{model_name}\"\n )\n\n try:\n return model_module.PREPARE_DATASET_FUNCTION # type: ignore\n except AttributeError:\n raise ModuleMissingPrepareDatsetFunction(\n f\"cosmobot_deep_learning.models.{model_name}.PREPARE_DATASET_FUNCTION not defined\"\n )", "def set_model_func(self, model):\n if model == 'SI':\n import cavefish_dadi.Models.si\n return cavefish_dadi.Models.si.si\n elif model == 'SC':\n import cavefish_dadi.Models.sc\n return cavefish_dadi.Models.sc.sc\n elif model == 'IM':\n import cavefish_dadi.Models.im\n return cavefish_dadi.Models.im.im\n elif model == 'AM':\n import cavefish_dadi.Models.am\n return cavefish_dadi.Models.am.am\n elif model == 'SC2M':\n import cavefish_dadi.Models.sc2m\n return cavefish_dadi.Models.sc2m.sc2m\n elif model == 'IM2M':\n import cavefish_dadi.Models.im2m\n return cavefish_dadi.Models.im2m.im2m\n elif model == 'AM2M':\n import cavefish_dadi.Models.am2m\n return cavefish_dadi.Models.am2m.am2m\n else:\n return None", "def get_model_function(name: str):\n if name not in REGISTRY:\n names = \", \".join(sorted(REGISTRY.keys()))\n raise KeyError(f\"Model {name} not found in registry. Available names: {names}\")\n return REGISTRY[name]", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def get_model_detection_function(model):\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name", "def get_validation_fn(\n test_dataset: tf.data.Dataset,\n model_fn: Callable[[], tf.keras.models.Model],\n loss_fn: Callable[[], tf.keras.losses.Loss],\n metrics_fn: Callable[[], tf.keras.metrics.Metric],\n) -> Callable[[], tf.data.Dataset]:\n\n def compiled_model() -> tf.keras.Model:\n val_model = model_fn()\n val_model.compile(\n loss=loss_fn(), optimizer=tf.keras.optimizers.Adam(), metrics=metrics_fn()\n )\n return val_model\n\n test_dataset = _convert_fn(test_dataset)\n\n def validation_fn(\n trained_model: tff.learning.Model,\n ) -> Callable[[], tf.data.Dataset]:\n val_model = compiled_model()\n trained_model_weights = tff.learning.ModelWeights(\n trainable=list(trained_model.trainable),\n non_trainable=list(trained_model.non_trainable),\n )\n\n trained_model_weights.assign_weights_to(val_model)\n metrics = val_model.evaluate(test_dataset, verbose=0)\n return dict(\n zip(val_model.metrics_names, val_model.evaluate(test_dataset, verbose=0))\n )\n\n return validation_fn", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def get_model_detection_function(model):\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn", "def register_udf(spark: SparkSession, udf: Callable, name: str) -> str:\n func_name = f\"{name}_{secrets.token_hex(4)}\"\n spark.udf.register(func_name, udf)\n logger.info(f\"Created model inference pandas_udf with name {func_name}\")\n return func_name", "def get_model_detection_function(model):\r\n\r\n @tf.function\r\n def detect_fn(image):\r\n \"\"\"Detect objects in image.\"\"\"\r\n\r\n image, shapes = model.preprocess(image)\r\n prediction_dict = model.predict(image, shapes)\r\n detections = model.postprocess(prediction_dict, shapes)\r\n\r\n return detections, prediction_dict, tf.reshape(shapes, [-1])\r\n\r\n return detect_fn", "def spectrum_funcs(model):\n\n def mk_dnde(fn):\n def dnde(photon_energies, cme):\n return fn(model, photon_energies, cme)\n\n return dnde\n\n return {\n \"mu mu\": mk_dnde(dnde_mumu),\n \"e e\": mk_dnde(dnde_ee),\n \"pi0 pi pi\": mk_dnde(dnde_pi0pipi),\n \"pi0 pi0 pi0\": mk_dnde(dnde_pi0pi0pi0),\n \"p p\": mk_dnde(dnde_pp),\n }", "def function_application(func):\n if func not in NUMEXPR_MATH_FUNCS:\n raise ValueError(\"Unsupported mathematical function '%s'\" % func)\n\n def mathfunc(self):\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{func}({expr})\".format(func=func, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{func}(x_0)\".format(func=func), (self,))\n return mathfunc", "def get_ufunc(ufunc_method: Optional[str] = None):\n if ufunc_method:\n if ufunc_method not in Replacements._ufunc_rep:\n return None\n return Replacements._ufunc_rep[ufunc_method]\n return Replacements._ufunc_rep['ufunc']", "def get_transform_fn():", "def get_function_from_text(f):\n return lambda x: eval_expr(f, {'x': x}, numpy_dict)", "def get_scoring_function(self,prop_name=None, prop_type=None, model_ckpt=None):\n\n if prop_name == 'qed':\n return qed_func()\n elif prop_name == 'sa':\n return sa_func()\n elif prop_name and prop_type and model_ckpt:\n return load_model_ckpt(prop_name=prop_name, model_type=prop_type, model_ckpt=model_ckpt)\n else:\n print(\"add valid model\")", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)" ]
[ "0.6317344", "0.6238467", "0.62369287", "0.60278875", "0.5895729", "0.5788726", "0.55613315", "0.5557939", "0.5516528", "0.55142033", "0.54652625", "0.54648143", "0.5380108", "0.53742975", "0.53681487", "0.5325461", "0.5320433", "0.5294789", "0.52846104", "0.52822757", "0.5266603", "0.52447605", "0.52189225", "0.5218033", "0.52176684", "0.52002275", "0.51943135", "0.5171223", "0.51673895", "0.51667064" ]
0.78834236
0
Register a given UDF with the give Spark session under the given name.
def register_udf(spark: SparkSession, udf: Callable, name: str) -> str: func_name = f"{name}_{secrets.token_hex(4)}" spark.udf.register(func_name, udf) logger.info(f"Created model inference pandas_udf with name {func_name}") return func_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def register_function(self, function, name=None):\n if name is None:\n name = function.__name__\n self.funcs[name] = function", "def register_udf(\n self,\n function: Any,\n return_type: \"DataTypeOrString\",\n name: Optional[str] = None,\n eval_type: int = PythonEvalType.SQL_BATCHED_UDF,\n deterministic: bool = True,\n ) -> str:\n\n if name is None:\n name = f\"fun_{uuid.uuid4().hex}\"\n\n # construct a PythonUDF\n py_udf = PythonUDF(\n output_type=return_type,\n eval_type=eval_type,\n func=function,\n python_ver=\"%d.%d\" % sys.version_info[:2],\n )\n\n # construct a CommonInlineUserDefinedFunction\n fun = CommonInlineUserDefinedFunction(\n function_name=name,\n arguments=[],\n function=py_udf,\n deterministic=deterministic,\n ).to_plan_udf(self)\n\n # construct the request\n req = self._execute_plan_request_with_metadata()\n req.plan.command.register_function.CopyFrom(fun)\n\n self._execute(req)\n return name", "def register_function(self, *args):\n if len(args) == 1:\n function = args[0]\n try:\n name = function.fact_name\n except AttributeError:\n name = function.__name__\n if name is None:\n raise Exception(\"Function does not have a name\")\n else:\n name, function = args\n self.functions[name] = function", "def register(name, func):\n WebSocketRouter.funcmap[name] = func", "def register(\n self, name: str, opset: OpsetVersion, func: Callable, custom: bool = True\n ) -> None:\n if \"::\" not in name:\n raise ValueError(\n f\"The name must be in the form of 'domain::op', not '{name}'\"\n )\n symbolic_functions = self._registry.setdefault(\n name, _SymbolicFunctionGroup(name)\n )\n if custom:\n symbolic_functions.add_custom(func, opset)\n else:\n symbolic_functions.add(func, opset)", "def existing_method_from_name(fun_name):\n global STensor\n assert hasattr(torch.Tensor, fun_name)\n if getattr(torch, fun_name) in STABLE_FUNCTIONS:\n stable_fun = STABLE_FUNCTIONS[getattr(torch, fun_name)]\n STABLE_FUNCTIONS[getattr(torch.Tensor, fun_name)] = stable_fun\n setattr(STensor, fun_name, stable_fun)\n else:\n print(f\"STILL NEED TO IMPLEMENT {fun_name}\")", "def register_udtf(\n self,\n function: Any,\n return_type: Optional[\"DataTypeOrString\"],\n name: str,\n eval_type: int = PythonEvalType.SQL_TABLE_UDF,\n deterministic: bool = True,\n ) -> str:\n udtf = PythonUDTF(\n func=function,\n return_type=return_type,\n eval_type=eval_type,\n python_ver=get_python_ver(),\n )\n\n func = CommonInlineUserDefinedTableFunction(\n function_name=name,\n function=udtf,\n deterministic=deterministic,\n arguments=[],\n ).udtf_plan(self)\n\n req = self._execute_plan_request_with_metadata()\n req.plan.command.register_table_function.CopyFrom(func)\n\n self._execute(req)\n return name", "def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=False):\n cls._check_supports_udf(\"register_anonymous\")\n if parameterized:\n return ParameterizedSelectOp(name, func, anonymous=True, is_udt=is_udt)\n iop = IndexUnaryOp._build(name, func, anonymous=True, is_udt=is_udt)\n return SelectOp._from_indexunary(iop)", "def register_backend(name, load_fn):\n assert name not in _backends\n _backends[name] = load_fn", "def add_function(self, name, nbparam, function):\n if \"_\" in name:\n raise RuntimeError( # pragma: no cover\n \"SQLite does not allow function name with _\")\n self._check_connection()\n if self._engine == \"SQLite\":\n self._connection.create_function(name, nbparam, function)", "def add_function(self, func_name, *args, **kwargs):\n if len(args) > 0:\n attr = args[0]\n else:\n attr = func_name.func_name\n self._user_funcs[attr] = func_name", "def add_global(self, name, func):\n self.env.globals[name] = func", "def _register_callable(\n self,\n f: Any,\n name: str,\n aggregation: bool,\n parameters: List[Tuple[str, type]],\n return_type: type,\n replace: bool = False,\n ):\n lower_name = name.lower()\n if lower_name in self.functions:\n if replace:\n self.function_list = list(\n filter(lambda f: f.name.lower() != lower_name, self.function_list)\n )\n del self.functions[lower_name]\n\n elif self.functions[lower_name] != f:\n raise ValueError(\n \"Registering different functions with the same name is not allowed\"\n )\n\n self.function_list.append(\n FunctionDescription(name.upper(), parameters, return_type, aggregation)\n )\n self.function_list.append(\n FunctionDescription(name.lower(), parameters, return_type, aggregation)\n )\n self.functions[lower_name] = f", "def replaces_ufunc(func: Callable[..., Tuple[str]], name: str):\n Replacements._ufunc_rep[name] = func\n return func", "def register(func):\n PLUGINS[func.__name__] = func\n return func", "def map_function(self, name):\n def decorator(func):\n\n # Check if the Thrift function was already assigned\n if name in self.__mapped_names:\n raise NameError(\n f'Thrift Function \"{name}\" is already assigned!'\n )\n\n self.__mapped_names.add(name)\n\n setattr(self, name, func)\n return func\n\n return decorator", "def wrap_impala_udf(hdfs_file, inputs, output, so_symbol, name=None):\n return udf.UDFCreator(hdfs_file, inputs, output, so_symbol, name=name)", "def register_function(\n self,\n f: Callable,\n name: str,\n parameters: List[Tuple[str, type]],\n return_type: type,\n replace: bool = False,\n ):\n self._register_callable(\n f,\n name,\n aggregation=False,\n parameters=parameters,\n return_type=return_type,\n replace=replace,\n )", "def register(name, fn=None):\n def _hook_add(func):\n if name not in _hooks:\n logger.debug(\"Creating new hook %s\" % name)\n _hooks[name] = []\n\n logger.debug('Registering hook %s for function %s' % (name, fn))\n _hooks[name].append(func)\n\n if fn is None:\n # Behave like a decorator\n def decorator(func):\n _hook_add(func)\n return func\n return decorator\n else:\n # Behave like a function, just register hook\n _hook_add(fn)", "def register(metric, name=None):\n return set_namespace(registered, metric, name=name, set_global=True)", "def register_node(name, eval_func, input_args, gpu_program=None, output=None, description=None):\n if name in NODE_REGISTRY:\n raise NodeExistsError(name)\n\n NODE_REGISTRY[name] = NodeDefinition(name, eval_func, input_args, output, gpu_program, description)", "def register(func):\n plugins[func.__name__] = func\n return func", "def add_function(self, function=None, name=\"main\"):\n if not isinstance(function, SSAFunction):\n print(\"Failed adding function! The input is not a SSAFunction.\")\n elif name in self.ssa.functions:\n print(\"Failed adding function! Name already exist in the NNSSA network!\")\n else:\n self.ssa.add_function(name, function)", "def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=False):\n cls._check_supports_udf(\"register_new\")\n iop = IndexUnaryOp.register_new(\n name, func, parameterized=parameterized, is_udt=is_udt, lazy=lazy\n )\n module, funcname = cls._remove_nesting(name, strict=False)\n if lazy:\n module._delayed[funcname] = (\n cls._get_delayed,\n {\"name\": name},\n )\n elif parameterized:\n op = ParameterizedSelectOp(funcname, func, is_udt=is_udt)\n setattr(module, funcname, op)\n return op\n elif not all(x == BOOL for x in iop.types.values()):\n # Undo registration of indexunaryop\n imodule, funcname = IndexUnaryOp._remove_nesting(name, strict=False)\n delattr(imodule, funcname)\n raise ValueError(\"SelectOp must have BOOL return type\")\n else:\n return getattr(module, funcname)", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def _ufunc(cls, name):\n if name not in cls._ufuncs:\n if cls.ufunc_mode == \"jit-lookup\":\n cls._ufuncs[name] = cls._ufunc_lookup(name)\n elif cls.ufunc_mode == \"jit-calculate\":\n cls._ufuncs[name] = cls._ufunc_calculate(name)\n else:\n cls._ufuncs[name] = cls._ufunc_python(name)\n return cls._ufuncs[name]", "def register(self, function, name=None, method='POST'):\r\n\r\n method = self.clean_method(method)\r\n\r\n # Generate a default name\r\n if not name:\r\n module = ''.join(str(function.__module__).rsplit('.ajax', 1))\r\n name = '.'.join((module, function.__name__))\r\n\r\n if ':' in name:\r\n log.error('Ivalid function name %s.' % name)\r\n return\r\n\r\n # Check for already registered functions\r\n if name in self._registry:\r\n log.error('%s was already registered.' % name)\r\n return\r\n\r\n # Create the dajaxice function.\r\n function = DajaxiceFunction(function=function,\r\n name=name,\r\n method=method)\r\n\r\n # Register this new ajax function\r\n self._registry[name] = function", "def add(self, name, function):\r\n\r\n # If this is not the final function name (there are more modules)\r\n # split the name again an register a new submodule.\r\n if '.' in name:\r\n module, extra = name.split('.', 1)\r\n if module not in self.submodules:\r\n self.submodules[module] = DajaxiceModule(module)\r\n self.submodules[module].add(extra, function)\r\n else:\r\n self.functions[name] = function", "def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func" ]
[ "0.6381424", "0.6292242", "0.6274659", "0.5750347", "0.5739359", "0.5733467", "0.56409955", "0.55877364", "0.5564187", "0.5543627", "0.5542775", "0.55320764", "0.5518689", "0.5453414", "0.5418701", "0.5416483", "0.54028904", "0.53713137", "0.52891856", "0.52680963", "0.5184796", "0.51774937", "0.5158297", "0.51538926", "0.5143909", "0.51387346", "0.5045724", "0.5012396", "0.5004873", "0.5002329" ]
0.816824
0
Initialize a Database with an open connection to the history database.
def __init__(self): if Database.filename is None: Database.filename = Config().GetString('HISTORY_DB') if Database.filename is None: logging.error('Missing ASH_CFG_HISTORY_DB variable?') self.connection = sqlite3.connect(Database.filename) self.connection.row_factory = sqlite3.Row self.cursor = self.connection.cursor()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_database(self):\n init_database(self.engine)", "def init_database(self):\n # init_database(self.engine)", "def __init__(self, *args, **kwargs):\n self.database = args[0] if len(args) else kwargs.get('database', 'jping.db')\n is_new = not os.path.exists(self.database)\n self._connection = sqlite3.connect(self.database)\n self._connection.row_factory = sqlite3.Row\n if is_new:\n self.create_schema()", "def init_database(cls):\n conn = config.db_connection_string(Settings)\n cls.Engine = create_engine(conn, echo=Settings.get('DEBUG'))\n cls.Session = sessionmaker(bind=cls.Engine)\n return cls", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def open_database(self):\n if self._conn is None:\n self._conn = sqlite3.connect(self._db_path)", "def __init__(self):\n self._connection = get_db_connection()", "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def __init__(self):\r\n assert isfile(DBClass.db_name), \"Database doesn't exists!\"\r\n\r\n self.conn = self.create_connection()\r\n self.cursor = self.conn.cursor()", "def open(self) -> \"PassDatabase\":\n\n if self._connection is None:\n self._connection = sqlite3.connect(self._pathTo)\n self._cursor = self._connection.cursor()\n\n return self", "def init() -> sqlite3.Connection:\n try:\n with open(DATABASE_INIT) as reader:\n sql = reader.read()\n connection = sqlite3.connect(DATABASE, check_same_thread=False)\n connection.executescript(sql)\n return connection\n except FileNotFoundError:\n log.error(DatabaseInitializeError())\n exit(1)", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)\n\n if self.sqlite_file is not None:\n dbname = 'sqlite:///%s' % self.sqlite_file\n self.sqlite_engine = create_engine(dbname, echo=False)\n self.sqlite_session = scoped_session(sessionmaker(bind=self.sqlite_engine))\n DB_Base.metadata.create_all(self.sqlite_engine)\n logger.info('Using SQLite %s' % self.sqlite_engine)", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)", "def __init__(self):\n\n self.connection = sqlite3.connect(self.dabatabase_name, uri=True)\n self.connection.cursor()\n self.connection.execute(self._create_table_stm)\n self.connection.commit()", "def obtainDatabaseConnection(self):\n\t\tself.databaseConnector = DatabaseConnector()", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def __init__(self) -> None:\n self.Database = Database()", "def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def __init__(self, db=None):\n if db is None:\n self.db = \"file:foxrollbot_db?mode=memory&cache=shared\"\n else:\n self.db = db\n\n # This attribute is used to maintain a single connection to the\n # database, so that in-memory databases aren't just lost after every\n # connection is finished.\n self._main_connection = sqlite3.connect(self.db, uri=True)\n\n self._load_statements()\n self._init_db()", "def init_database(self):\n try:\n DatabaseCreation.create_database()\n except BaseExceptionHandler as base_exception_handler:\n self.logger.error(message=base_exception_handler.error_message)", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def connect_db(self) -> sqlite3.Connection:\n self.connection = sqlite3.connect(self.database)\n self.connection.row_factory = sqlite3.Row\n\n self.get_cursor()", "def __init_database(self):\n from admin.database import init_db\n init_db()", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def db_open():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = db_connect()\n return g.sqlite_db" ]
[ "0.7012494", "0.6909706", "0.6851727", "0.6823973", "0.6813911", "0.6800843", "0.6735885", "0.67349523", "0.6691175", "0.6688016", "0.66699165", "0.66486233", "0.66061836", "0.6600309", "0.6582314", "0.65171427", "0.64984584", "0.6497574", "0.6490289", "0.6479513", "0.6465936", "0.6444394", "0.6430106", "0.6387969", "0.6375347", "0.6363983", "0.6361972", "0.6356578", "0.6356348", "0.6331386" ]
0.763907
0
clean the ad block and link block
def clean_spam(doc): for tag in doc.find_all(["div","ol", "dl", "ul", "table", "section"]): if no_block_children(tag) and is_ad_block(tag): tag.extract()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_link():", "def delete_ad_text_block(self, table):\n index = 0\n for tr in table:\n index +=1\n if tr.get_attribute('class') == 'ad-text-block':\n del table[index-1]\n return table", "def horde_cleanup(self):", "def clean(c):", "def clean(_context):", "def cleaning (data):", "def _die(self):\n\t\tself.site.agents_in_site.remove(self)\n\t\tself.site = None\n\t\tif self.debt_link != None:\n\t\t\tself.debt_link.lender.loans.remove(self.debt_link)\n\t\t\tself.debt_link = None\n\t\tfor l, loan in enumerate(self.loans):\n\t\t\tloan.borrower.debt_link = None\n\t\t\tdel self.loans[l]\n\t\tif self.gift_link != None:\n\t\t\tself.gift_link.giver.gifts.remove(self.gift_link)\n\t\t\tself.gift_link = None\n\t\tfor g, gift in enumerate(self.gifts):\n\t\t\tgift.taker.gift_link = None\n\t\t\tdel self.gifts[g]\n\t\tself.agents_list.remove(self)", "def cleanup():", "def clear_blockages(self):\n debug.info(3,\"Clearing all blockages\")\n self.rg.clear_blockages()", "def clear_text(body):\n soup = BeautifulSoup(body, features=\"html.parser\")\n for a in soup.findAll('a'):\n # print(a)\n # del a['href']\n a.replaceWithChildren()\n\n # for code in soup.findAll('code'):\n # # print(a)\n # # del a['href']\n # print(\"888888888888888888\")\n # print(code)\n # print(\"888888888888888888\")\n # #code.replaceWithChildren()\n #\n # del code\n\n return str(soup)", "def article_cleanser(dirty_content):\n global id_\n\n articles_dict = {}\n source = dirty_content.find(\"title\").get_text()\n data_tuple_list = []\n\n for item in dirty_content.find_all(\"item\"):\n article_link = item.link.get_text()\n\n jj = item.pubDate.get_text().split(' ')\n article_pubdate = datetime(int(jj[3]), int(strptime(jj[2], '%b').tm_mon), int(jj[1]), int(jj[4][:2]),\n int(jj[4][3:5]),\n int(jj[4][6:])).isoformat(' ')\n\n if source == \"Sudans Post\":\n \"\"\" If the feed is for Sudans Post then it should scrap accordingly\"\"\"\n article_number = item.guid.get_text()\n guid_list = re.findall(\"(?<=p=)[0-9]{5}\", article_number)\n article_guid = int(guid_list[0] + '000')\n article_description = item.description.get_text().replace('[&#8230;]', '....')\n article_description = article_description.replace('&#8217;', \"'\")\n article_title = item.title.get_text().replace(u'\\xa0', ' ')\n article_text = item.encoded.get_text() # gives a not-navigable string\n\n \"\"\"\n So to make the article_content navigable I write it into a html file\n and retrieving it again below\n \"\"\"\n with open(\"article_content.html\", 'w') as fp:\n fp.write(article_text)\n\n with open(\"article_content.html\", \"r\") as fp:\n article_soup = BeautifulSoup(fp, \"lxml\")\n\n article_content = \"\"\n for c in article_soup.find_all('p'):\n article_content += \"\\n\" + c.get_text()\n\n image_link = article_soup.find('a')[\"href\"]\n\n categories = \"\"\n for cat in item.find_all(\"category\"):\n categories += f\"/{cat.get_text().lower()}\"\n\n elif source == \"Radio Tamazuj - Latest news\":\n \"\"\"If the feed is for Radio tamazuj, scrap accordingly \"\"\"\n\n article_title = item.title.get_text()\n article_description = item.description.get_text()\n article_guid = item.guid.get_text()[:8]\n categories = \"Not available\"\n\n def get_content_from_link(link):\n \"\"\" \n Gets the article from the link, because radio tamazuj doesn't \n post the article content on the rss feed\n \"\"\"\n\n page = requests.get(link)\n soup = BeautifulSoup(page.text, \"lxml\")\n return soup\n\n soup = get_content_from_link(article_link)\n image_link = \"https://radiotamazuj.org\" + soup.find(\"img\")[\"src\"]\n\n body = soup.select(\".body-text\")\n content = \"\"\n for i in body:\n content += i.get_text()\n article_content = content.replace(u'\\xa0', ' ')\n\n articles_dict[id_] = {\n \"id\": id_,\n \"source\": source,\n \"Title\": article_title,\n \"Link\": article_link,\n \"PubDate\": article_pubdate,\n \"guid\": article_guid,\n \"Description\": article_description,\n \"Content\": article_content,\n \"categories\": categories,\n \"image_link\": image_link,\n }\n data_tuple = (\n id_,\n source,\n article_title,\n article_link,\n article_pubdate,\n article_guid,\n article_description,\n article_content,\n categories,\n image_link,\n )\n id_ += 1 # counts number of articles\n data_tuple_list.append(data_tuple)\n return data_tuple_list", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def _clean_up_page_entry(self):\n self.start_page.delete(0, \"end\")\n self.end_page.delete(0, \"end\")", "def unlink(self, link_id):", "def clean_link(self, link):\n link = link.strip(\"[]\")\n if \"|\" in link: \n link = link.split(\"|\",1)[0]\n link = link.strip() #remove trailing white space\n return link", "def fix_default_content(portal):\n logger = logging.getLogger(PROJECTNAME)\n content_ids = ['front-page', 'events', ]\n portal_ids = portal.objectIds()\n for cId in content_ids:\n if cId in portal_ids:\n portal.manage_delObjects([cId])\n logger.info('Deleted object with id %s' % cId)\n if 'news' in portal_ids:\n news = portal['news']\n news.setTitle(u'Notícias')\n news.setDescription(u'Notícias do Plone Symposium')\n news.reindexObject()\n if 'Members' in portal_ids:\n # Hide user's tab\n members = portal['Members']\n members.setTitle(u'Participantes')\n members.setExcludeFromNav(True)\n members.reindexObject()\n\n logger.info('Cleaned up portal contents')", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)", "def cleanup(self):\n self.result.extend(self.endTagList)", "def _removeUnusedElements(self, element):\n self.log(\"element:%r\" % element)\n for pad in element.src_pads():\n if pad.is_linked():\n peer = pad.get_peer().get_parent()\n self._removeUnusedElements(peer)\n if not peer in self._validelements:\n self.log(\"removing %s\" % peer.get_name())\n pad.unlink(pad.get_peer())\n peer.set_state(gst.STATE_NULL)\n self.remove(peer)", "def clear(self):\n self._clear(is_link=True)", "def _clean(self):\n\t\tfor hid in self.handlers_id:\n\t\t\tself.obj.handler_disconnect(hid)", "def cleanup(self):\n for j in range(len(self.endTagList)):\n self.result = self.result + self.endTagList[j]", "def fix_links():\n pass", "def post_cleanup(self):\n targetNode = self.article.top_node\n node = self.add_siblings(targetNode)\n\n # fixing nytimes images\n self.replace_attributes(node,\n tag_name = 'img',\n old_attribute_name = 'src',\n new_attribute_name = 'data-src')\n\n # fixing ebay images\n self.replace_attributes(node,\n tag_name = 'img',\n old_attribute_name = 'src',\n new_attribute_name = 'imgurl')\n\n self.build_tag_paths(node, 'img', 'src')\n self.build_tag_paths(node, 'a', 'href')\n allowed_tags = ['p', 'img', 'ul', 'ol', 'h2', 'h3', 'h4', 'h5', 'h6',\n 'strong', 'em', 'blockquote']\n\n if self.known_host_content_tags:\n return node\n\n for e in self.parser.getChildren(node):\n e_tag = self.parser.getTag(e)\n if e_tag not in allowed_tags:\n if (self.is_highlink_density(e) \\\n or self.is_table_and_no_para_exist(e) \\\n or not self.is_nodescore_threshold_met(node, e)) \\\n and not self.have_images(e):\n self.parser.remove(e)\n return node", "def _maskhg19(self):\n if len(self._current_block) > 2:\n self._current_block[0].text = self._current_block[1].text\n self._current_block[0].size = self._current_block[1].size\n self._current_block[0].setstring()\n self._current_block.remove(self._current_block[1])\n else:\n self._current_block = []", "def remove_urls(text):\n pass", "def remove_lead(self, lead):\n self.leads = self.leads[:lead] + self.leads[lead+1:]\n self.connections = self.connections[:lead] + self.connections[lead+1:]" ]
[ "0.5931258", "0.5751617", "0.5729552", "0.5715315", "0.5449242", "0.5448983", "0.5392261", "0.535174", "0.53408563", "0.5286636", "0.52634615", "0.5252743", "0.5208404", "0.5206515", "0.5206432", "0.5198738", "0.51939416", "0.51939416", "0.51939416", "0.51933086", "0.51892257", "0.5167866", "0.5154277", "0.515224", "0.51513064", "0.51507455", "0.51422757", "0.5125756", "0.5124409", "0.511096" ]
0.68033314
0
Load Stack instance from Experiment.
def test01_load_stack(self): stack = self.experiment.load_stack(self.experiment.stack_ids[0]) self.assertTrue(isinstance(stack, Stack))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cls, path: str):\n with open(path, \"r\") as f:\n run_data = json.load(f)\n return Experiment.load_from_dict(run_data)", "def load_experiment(file_name: str):\n exp = Experiment2P()\n # initialize the lazy-load objects with empty lists\n exp.tail_data = []\n exp.replaced_tail_frames = []\n exp.laser_data = []\n exp.all_c = []\n exp.all_dff = []\n exp.func_stacks = []\n with h5py.File(file_name, 'r') as dfile:\n exp.version = dfile[\"version\"][()] # in future allows for version specific loading\n try:\n if exp.version == b\"unstable\" or exp.version == \"unstable\":\n warnings.warn(\"Experiment file was created with development version of analysis code. Trying to \"\n \"load as version 1\")\n elif int(exp.version) > 2:\n raise IOError(f\"File version {exp.version} is larger than highest recognized version '2'\")\n except ValueError:\n raise IOError(f\"File version {exp.version} not recognized\")\n # load general experiment data\n n_planes = dfile[\"n_planes\"][()] # inferrred property of class but used here for loading plane data\n exp.experiment_name = dfile[\"experiment_name\"][()]\n exp.original_path = dfile[\"original_path\"][()]\n exp.scope_name = dfile[\"scope_name\"][()]\n exp.comment = dfile[\"comment\"][()]\n exp.tail_frame_rate = dfile[\"tail_frame_rate\"][()]\n # load singular parameter dictionary\n exp.info_data = exp._load_dictionary(\"info_data\", dfile)\n # load tail-data modification flag if this is version 2\n if int(exp.version) > 1:\n exp.tail_data_augmented = dfile[\"tail_data_augmented\"][()]\n # load per-plane data\n for i in range(n_planes):\n plane_group = dfile[str(i)]\n exp.scanner_data.append(exp._load_dictionary(\"scanner_data\", plane_group))\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.projections.append(plane_group[\"projection\"][()])\n if \"func_stack\" in plane_group:\n exp.func_stacks.append(plane_group[\"func_stack\"][()])\n if \"anat_projection\" in plane_group: # test if this experiment was dual-channel\n exp.anat_projections.append(plane_group[\"anat_projection\"][()])\n if \"tail_data\" in plane_group: # test if this experiment had tail data (for all planes)\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.bout_data.append(plane_group[\"bout_data\"][()])\n exp.tail_frame_times.append(plane_group[\"tail_frame_time\"][()])\n if int(exp.version) > 1 and \"replaced_tail_frames\" in plane_group:\n exp.replaced_tail_frames.append(plane_group[\"replaced_tail_frames\"][()])\n if \"laser_data\" in plane_group: # test if this experiment had laser data\n exp.laser_data.append(plane_group[\"laser_data\"][()])\n exp.all_c.append(plane_group[\"C\"][()])\n exp.all_dff.append(plane_group[\"dff\"][()])\n exp.all_centroids.append(plane_group[\"centroids\"][()])\n exp.all_sizes.append(plane_group[\"sizes\"][()])\n exp.all_spatial.append(plane_group[\"spatial\"][()])\n ps = plane_group[\"mcorr_dict\"][()]\n exp.mcorr_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_extract_dict\"][()]\n exp.cnmf_extract_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_val_dict\"][()]\n exp.cnmf_val_dicts.append(json.loads(ps))\n exp.populated = True\n return exp", "def test_load_experiment(self):\n exp = Experiment(self.epath,\n normalization='ch0',\n auto_alignment=False)\n self.assertTrue(isinstance(exp, Experiment))", "def _load_experiment(\n experiment_name: str, decoder: Decoder, reduced_state: bool = False\n) -> Experiment:\n # Convert SQA to user-facing class outside of session scope to avoid timeouts\n return decoder.experiment_from_sqa(\n experiment_sqa=_get_experiment_sqa_reduced_state(\n experiment_name=experiment_name, decoder=decoder\n )\n if reduced_state\n else _get_experiment_sqa(experiment_name=experiment_name, decoder=decoder),\n reduced_state=reduced_state,\n )", "def load_experiment(\n experiment_name: str,\n config: Optional[SQAConfig] = None,\n reduced_state: bool = False,\n) -> Experiment:\n config = config or SQAConfig()\n decoder = Decoder(config=config)\n return _load_experiment(\n experiment_name=experiment_name, decoder=decoder, reduced_state=reduced_state\n )", "def load_stack(stack):\n path = os.path.join(STACK_DIRECTORY, '%s.%s' % (stack.module, stack.caller))\n if os.path.exists(path):\n with open(path, 'rb') as f:\n return dill.load(f)\n return None", "def load(self, path):\n with open(path, 'rb') as fp:\n me = pickle.load(fp)\n self.exp = me[\"exp\"] # type: Experiment\n with suppress_errors(\"output root directory may have changed\"):\n self.mru_exp_root = self.exp.root\n self.chdir()\n self.imdb = self.exp.imdb\n self.exp_df = me[\"exp_df\"]\n with suppress_errors(\"caffenet may no longer exist\"):\n self.caffenet = self.exp.caffenet\n with suppress_errors(\"caffemodel may no longer exist\"):\n self.caffemodel = self.exp.caffemodel\n with suppress_errors(\"data directory may have changed\"):\n self.mru_exp_data = self.exp.data\n self.lbl_exp_data.setText(self.mru_exp_data)\n self.edt_caffemodel.setText(self.caffemodel)\n self.edt_caffenet.setText(self.caffenet)\n self.lbl_exp_data.setText(self.mru_exp_data)\n if self.exp_df is not None:\n model = PandasModel(self.exp_df)\n self.table_imdb.setModel(model)\n self.table_imdb.show()\n # Update the status label\n self.lbl_last_status.setText(str(self.exp))", "def __init__(self, name='DQN', load_model=None):\n self.equity_alive = 0\n self.actions = []\n self.last_action_in_stage = ''\n self.temp_stack = []\n self.name = name\n self.autoplay = True\n\n self.dqn = None\n self.env = None\n\n if load_model:\n self.load(load_model)", "def load(extended=False):\n\n _fetch_large()\n if extended:\n return _load(cache_experiment_extended, _parse_experiment)\n else:\n return _load(cache_experiment, _parse_experiment)", "def resume(self, tag=\"current\"):\n\n if not self.is_resumable(tag):\n logging.warning(\"This exeriment is not resumable!\")\n self.force_restart(tag)\n\n else:\n logging.info(\"Loading the experiment from {}\".format(self._dir_name))\n\n save_dir = os.path.join(self._dir_name, tag)\n\n if self._model is not None:\n self._model.load(save_dir)\n\n if self._config is not None:\n file_name = os.path.join(save_dir, \"config.p\")\n self._config.load(file_name)\n\n if self._logger is not None:\n file_name = os.path.join(save_dir, \"logger\")\n self._logger.load(file_name)\n\n if self._train_statistics is not None:\n file_name = os.path.join(save_dir, \"train_statistics.p\")\n self._train_statistics.load(file_name)\n\n if self._data_iterator is not None:\n file_name = os.path.join(save_dir, \"data_iterator.p\")\n self._data_iterator.load(file_name)", "def reload(self):\n\n from experiment.ExperimentLoader import loadExperimentFile\n loadExperimentFile(self, self.exp)\n return self", "def load_game(self, path):\n temp_stack = self.state_stack\n try:\n file = open(path, 'rb')\n self.state_stack = pic.load(file)\n for i in self.state_stack.states:\n i.on_load()\n del temp_stack\n except IOError or pic.UnpicklingError as e:\n print(\"Game load error: {}\".format(e))\n self.state_stack = temp_stack", "def load_stack(filename):\n data = np.genfromtxt(filename, skip_header=1)\n index_arr = data[:, 2]\n thickness_arr = data[:, 3] / 1e9\n stack = Stack(index_arr, thickness_arr)\n return stack", "def import_stack(ctx, environment, stack, aws_stack_name, template_path):\n if not template_path:\n template_path = os.path.join(\n \"templates\",\n aws_stack_name + \".yaml\"\n )\n\n migrator.import_stack(\n Environment(\n sceptre_dir=ctx.obj[\"sceptre_dir\"],\n environment_path=environment,\n options=ctx.obj[\"options\"]\n ),\n aws_stack_name,\n stack,\n template_path\n )", "def load(cls, filename_or_stream: str | PathLike | TextIO) -> \"Experiment\":\n if isinstance(filename_or_stream, (str, PathLike)):\n p = Path(filename_or_stream)\n if not p.suffix:\n p = p.with_suffix(\".json\")\n s: TextIO = open(p, \"r\")\n close = True\n else:\n s = filename_or_stream\n close = False\n\n exp = cls._structure(json.load(s))\n if close:\n s.close()\n return exp", "def experiments_init(self):\n pass", "def __init__(self, experiment, label=None):\n\n super(PlotCellTypeStack, self).__init__(experiment,\n name=\"PlotCellTypeStack\",\n label=label)\n\n self.epoch_start = self.experiment.config.getint(self.config_section, 'epoch_start', 0)\n self.epoch_end = self.experiment.config.getint(self.config_section, 'epoch_end', default=self.experiment.config.getint('Experiment', 'epochs', default=-1))\n self.frequency = self.experiment.config.getint(self.config_section, 'frequency', 1)\n self.priority = self.experiment.config.getint(self.config_section, 'priority', 0)\n self.filename = self.experiment.config.get(self.config_section, 'filename', 'cell_type_stack.pdf')\n\n self.epochs = []\n self.typecounts = []", "def load_inst(self):\n self.sanity_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))\n self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))\n self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))\n self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n self.history = pickle.load(open(fname_pub_history, 'rb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n self.staff = pickle.load(open(fname_pub_staff, 'rb'))", "def __init__(self):\n self._stack = Stack()", "def load(self):\n return exp.load(strain = self.strain, dtype = self.dtype, wid = self.wid, stage = self.stage, label = self.label, \n valid_only = self.valid_only, replace_invalid = self.replace_invalid, memmap = self.memmap);", "def load(cls, labpath: str) -> None:\n raise NotImplementedError", "def Stack(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.configelement.stack.stack import Stack\n return Stack(self)", "def from_pickle_file(filename):\n with open(filename, \"rb\") as infile:\n obj = pickle.load(infile)\n assert isinstance(obj, ExperimentList)\n return obj", "def load(app, verbose, replay, exp_config=None):\n if replay:\n exp_config = exp_config or {}\n exp_config[\"replay\"] = True\n log(header, chevrons=False)\n loader = LoaderDeployment(app, Output(), verbose, exp_config)\n loader.run()", "def experiment_init(self):\n pass", "def __init__(self, experiment_data):\n self._experiment_data = experiment_data", "def __init__(self, cache_path=None, load=True):\n self.init(cache_path=cache_path, load=load)\n if not self.cache_path:\n return\n\n self.index = self.attributes['run_index']\n self.has_executed = False\n\n self.attrs = ExperimentAttributes(self.attributes)", "def load(cls, src: str):\n # Load check-point\n state_dict = torch.load(src)\n\n # Create a new module\n specifics = state_dict.pop(\"additional_state\")\n configuration = specifics[\"configuration\"]\n module = Net(**configuration)\n\n # Restore state\n module.load_state_dict(state_dict)\n\n # End\n return module", "def load_stim(exptpath, verbose = True):\n # Look for a file with the suffix '_stim.pkl'\n pklpath = None\n for f in os.listdir(exptpath):\n if f.endswith('_stim.pkl'):\n pklpath = os.path.join(exptpath, f)\n if verbose:\n print \"Pkl file:\", f\n\n if pklpath is None:\n raise IOError(\n 'No files with the suffix _stim.pkl were found in {}'.format(\n exptpath\n )\n )\n\n return pd.read_pickle(pklpath)", "def load_context(stackname):\n\n # giorgio@2018-11-03: \"Current situation is you may have a old context around on your local disk.\n # This applies to `elife-alfred--prod` as well. Making the context always downloaded from S3 avoids this stale copy\n # causing weird bugs like https://alfred.elifesciences.org/job/process/job/process-master-server/7, but it may be slower.\n # lsh@2021-06-22: link above still works, I think this is the error being referred to:\n # ...\n # 14:52:48 return workfn(**work_kwargs)\n # 14:52:48 File \"/ext/srv/builder/src/buildercore/bootstrap.py\", line 514, in _update_ec2_node\n # 14:52:48 builder_configuration_repo = fdata['configuration-repo']\n # 14:52:48 KeyError: 'configuration-repo'\n #\n # if not download_from_s3(stackname, refresh=True):\n # raise MissingContextFile(\"We are missing the context file for %s, even on S3. Does the stack exist?\" % stackname)\n\n #path = local_context_file(stackname)\n #contents = json.load(open(path, 'r'))\n\n # lsh@2021-06-22: broke the above logic into two parts so I can swap out s3 during testing\n contents = _load_context_from_s3(stackname)\n\n # fallback: if legacy 'project.aws' key exists, use that for 'aws'\n if contents.get('project', {}).get('aws'):\n LOG.warn(\"stack context is using legacy 'project.aws' instead of just 'aws': %s\" % stackname)\n contents['aws'] = contents['project']['aws']\n # end of fallback\n\n return contents" ]
[ "0.62029076", "0.6123275", "0.6102033", "0.588782", "0.57467204", "0.57370037", "0.5539117", "0.5354052", "0.5341585", "0.5298622", "0.5291302", "0.5280798", "0.5170319", "0.5166367", "0.5153034", "0.51518595", "0.5143899", "0.51388705", "0.51269186", "0.5107662", "0.5100894", "0.50915164", "0.5074778", "0.5027377", "0.5007913", "0.49925208", "0.4982432", "0.49706247", "0.49495414", "0.49457577" ]
0.75396556
0
This method is called at deinitialization time
def deinit(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:\n ...", "def deinit(self) -> None:\n ...", "def deinit(self):\n self.reset()", "def _reinitialize(self):\n return", "def finalise(self):", "def deinit(self):\n self.reset()", "def __init__(self):\r\n\t\tself.clear()", "def post_init(self):\n\t\tpass", "def __init__(self):\n self.reset()", "def __init__(self) -> None:\n\n self.reset()", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def __init__(self):\r\n self.clear()", "def __init__(self):\n self.clear()", "def _finalise_construction(self):\n pass", "def _post_init(self):\n pass", "def clean_up(self):\n\t\tpass", "def __init__(self):\r\n\r\n self.reset()", "def finalize(self):" ]
[ "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8598001", "0.8460669", "0.8460669", "0.80290407", "0.78885597", "0.78108764", "0.7737444", "0.7614071", "0.7612964", "0.7602854", "0.7587629", "0.75617516", "0.75617516", "0.75617516", "0.7550986", "0.7545708", "0.7526551", "0.7521462", "0.74943614", "0.7493069", "0.7470038" ]
0.8913745
0
Construct a Slurm command to run the given sbatch script in a parseable way. This returns a sequence of commandline arguments that can be passed directly to subprocess.run(). This function passes parsable to sbatch to force the output to an easily machinereadable format; note however that even with this flag errors are printed in the usual way.
def build_sbatch_command( script_path: Path, *, dependencies: Sequence[SlurmJobID] = (), job_name: Optional[str] = None, log_dir: Optional[Path] = None, email: Optional[Email] = None, mail_types: Sequence[MailType] = (), extra_sbatch_args: Sequence[str] = (), script_args: Sequence[str], ) -> Sequence[str]: result = ["sbatch", "--parsable", "--requeue"] if dependencies: result.extend( [ # Don't run this job until the dependencies complete successfully. f"--dependency=afterok:{':'.join(dependencies)}", # If any of the dependencies fail, cancel this job. "--kill-on-invalid-dep=yes", ] ) if job_name: result.append(f"--job-name={job_name}") if log_dir: log_dir.mkdir(exist_ok=True, parents=True) result.append(f"--output={str(log_dir.joinpath('R-%x.%j.out'))}") result.extend(_build_email_arg(email=email, mail_types=mail_types)) result.extend(extra_sbatch_args) result.append(str(script_path)) result.extend(script_args) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submitSlurmScript(commands_list, outputName = None):\n longString = \";\".join(commands_list)\n print(longString.replace(\";\", \"\\n\"))\n if outputName is not None:\n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --output {outputSlurm} --wrap=\"{commandString}\"'.format(commandString = longString, outputSlurm = outputName)\n else: \n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --wrap=\"{0}\"'.format(longString)\n os.system(sCommand)", "def build_bash_command(\n script_path: Path,\n *,\n # pylint:disable=unused-argument\n dependencies: Sequence[SlurmJobID] = (),\n job_name: Optional[str] = None,\n log_dir: Optional[Path] = None,\n email: Optional[Email] = None,\n mail_types: Sequence[MailType] = (),\n extra_sbatch_args: Sequence[str] = (),\n # pylint:enable=unused-argument\n script_args: Sequence[str],\n) -> Sequence[str]:\n result = [\"bash\", str(script_path)]\n result.extend(script_args)\n return result", "def sbatch(self, cmd, alloc=None, walltime=None, memory=None, nodes=1,\n feature=None, name='reV', stdout_path='./stdout', keep_sh=False,\n conda_env=None, module=None,\n module_root='/shared-projects/rev/modulefiles'):\n\n if len(name) > self.MAX_NAME_LEN:\n msg = ('Cannot submit job with name longer than {} chars: \"{}\"'\n .format(self.MAX_NAME_LEN, name))\n logger.error(msg)\n raise ValueError(msg)\n\n status = self.check_status(job_name=name)\n\n if status is not None:\n logger.info('Not submitting job \"{}\" because it is in '\n 'squeue or has been recently submitted'.format(name))\n out = None\n err = 'already_running'\n\n else:\n fname = '{}.sh'.format(name)\n self.make_path(stdout_path)\n\n # make all the sbatch arguments\n sb_a = f'#SBATCH --account={alloc}' if alloc is not None else ''\n walltime = self.format_walltime(walltime)\n sb_t = f'#SBATCH --time={walltime}' if walltime is not None else ''\n sb_jn = f'#SBATCH --job-name={name} # job name'\n sb_no = f'#SBATCH --nodes={nodes} # number of nodes'\n sb_out = f'#SBATCH --output={stdout_path}/{name}_%j.o'\n sb_err = f'#SBATCH --error={stdout_path}/{name}_%j.e'\n\n sbf, sbm, env_str = self._special_cmd_strs(feature, memory, module,\n module_root, conda_env)\n\n script_args = ['#!/bin/bash']\n sb_args = (sb_a, sb_t, sb_jn, sb_no, sb_out, sb_err, sbf, sbm,\n env_str)\n for sb_arg in sb_args:\n if sb_arg:\n script_args.append(sb_arg)\n\n script_args.append('echo Running on: $HOSTNAME, '\n 'Machine Type: $MACHTYPE')\n script_args.append(cmd)\n\n script = '\\n'.join(script_args)\n\n # write the shell script file and submit as qsub job\n self.make_sh(fname, script)\n out, err = self.submit('sbatch {script}'.format(script=fname))\n out = self._job_id_or_out(out)\n\n if not keep_sh:\n self.rm(fname)\n\n if err:\n msg = 'Received a SLURM error or warning: {}'.format(err)\n logger.warning(msg)\n warn(msg, SlurmWarning)\n else:\n job_id = int(out.split(' ', maxsplit=-1)[-1])\n out = str(job_id)\n logger.debug('SLURM job \"{}\" with id #{} submitted '\n 'successfully'.format(name, job_id))\n self._queue[job_id] = {self.QCOL_ID: job_id,\n self.QCOL_NAME: name,\n self.QCOL_STATUS: 'PD'}\n\n return out, err", "def start_sbatch_job(collection, exp_array, unobserved=False, name=None,\n output_dir_path=\".\", sbatch_options=None, max_simultaneous_jobs=None,\n debug_server=False):\n import pkg_resources\n\n # Set Slurm job array options\n sbatch_options['array'] = f\"0-{len(exp_array) - 1}\"\n if max_simultaneous_jobs is not None:\n sbatch_options['array'] += f\"%{max_simultaneous_jobs}\"\n\n # Set Slurm output parameter\n if 'output' in sbatch_options:\n raise ConfigError(f\"Can't set sbatch `output` Parameter explicitly. SEML will do that for you.\")\n elif output_dir_path == \"/dev/null\":\n output_file = output_dir_path\n else:\n output_file = f'{output_dir_path}/{name}_%A_%a.out'\n sbatch_options['output'] = output_file\n\n # Construct sbatch options string\n sbatch_options_str = create_slurm_options_string(sbatch_options, False)\n\n # Construct chunked list with all experiment IDs\n expid_strings = [('\"' + ';'.join([str(exp['_id']) for exp in chunk]) + '\"') for chunk in exp_array]\n\n with_sources = ('source_files' in exp_array[0][0]['seml'])\n use_conda_env = ('conda_environment' in exp_array[0][0]['seml']\n and exp_array[0][0]['seml']['conda_environment'] is not None)\n\n # Construct Slurm script\n template = pkg_resources.resource_string(__name__, \"slurm_template.sh\").decode(\"utf-8\")\n prepare_experiment_script = pkg_resources.resource_string(__name__, \"prepare_experiment.py\").decode(\"utf-8\")\n prepare_experiment_script = prepare_experiment_script.replace(\"'\", \"'\\\\''\")\n if 'working_dir' in exp_array[0][0]['seml']:\n working_dir = exp_array[0][0]['seml']['working_dir']\n else:\n working_dir = \"${{SLURM_SUBMIT_DIR}}\"\n\n variables = {\n 'sbatch_options': sbatch_options_str,\n 'working_dir': working_dir,\n 'use_conda_env': str(use_conda_env).lower(),\n 'conda_env': exp_array[0][0]['seml']['conda_environment'] if use_conda_env else \"\",\n 'exp_ids': ' '.join(expid_strings),\n 'with_sources': str(with_sources).lower(),\n 'prepare_experiment_script': prepare_experiment_script,\n 'db_collection_name': collection.name,\n 'sources_argument': \"--stored-sources-dir $tmpdir\" if with_sources else \"\",\n 'verbose': logging.root.level <= logging.VERBOSE,\n 'unobserved': unobserved,\n 'debug_server': debug_server,\n 'tmp_directory': SETTINGS.TMP_DIRECTORY\n }\n setup_command = SETTINGS.SETUP_COMMAND.format(**variables)\n end_command = SETTINGS.END_COMMAND.format(**variables)\n\n script = template.format(\n setup_command=setup_command,\n end_command=end_command,\n **variables,\n )\n\n path = os.path.join(SETTINGS.TMP_DIRECTORY, f'{uuid.uuid4()}.sh')\n with open(path, \"w\") as f:\n f.write(script)\n\n try:\n output = subprocess.run(f'sbatch {path}', shell=True, check=True, capture_output=True).stdout\n except subprocess.CalledProcessError as e:\n logging.error(f\"Could not start Slurm job via sbatch. Here's the sbatch error message:\\n\"\n f\"{e.stderr.decode('utf-8')}\")\n os.remove(path)\n exit(1)\n\n slurm_array_job_id = int(output.split(b' ')[-1])\n for task_id, chunk in enumerate(exp_array):\n for exp in chunk:\n if not unobserved:\n collection.update_one(\n {'_id': exp['_id']},\n {'$set': {\n 'status': States.PENDING[0],\n 'slurm.array_id': slurm_array_job_id,\n 'slurm.task_id': task_id,\n 'slurm.sbatch_options': sbatch_options,\n 'seml.output_file': f\"{output_dir_path}/{name}_{slurm_array_job_id}_{task_id}.out\"}})\n logging.verbose(f\"Started experiment with array job ID {slurm_array_job_id}, task ID {task_id}.\")\n os.remove(path)", "def parse_command_line():\n parser = argparse.ArgumentParser(prog='scoring')\n parser.add_argument(\"pdb_list\", help=\"list of PDB structures\")\n script_args = parser.parse_args()\n return script_args", "def submit_job(sample_config, jobname, rundir, cliargs, extramodules=[]):\n slurmfile_path = os.path.join(rundir, \"{}.slurm\".format(jobname))\n with open(slurmfile_path, \"w\") as slurmfile:\n slurmfile.write(\"#! /bin/bash -l\\n\")\n slurmfile.write(\"#SBATCH -A {}\\n\".format(cliargs.project))\n slurmfile.write(\"#SBATCH -o {}.out\\n\".format(jobname))\n slurmfile.write(\"#SBATCH -e {}.err\\n\".format(jobname))\n slurmfile.write(\"#SBATCH -J {}.job\\n\".format(jobname))\n if cliargs.threads<16 :\n slurmfile.write(\"#SBATCH -p core -n {}\\n\".format(cliargs.threads))\n else:\n slurmfile.write(\"#SBATCH -p node -n {}\\n\".format(cliargs.threads))\n slurmfile.write(\"#SBATCH -t {}\\n\".format(cliargs.time))\n if hasattr(cliargs, \"email\"):\n slurmfile.write(\"#SBATCH --mail-user {}\\n\".format(cliargs.email))\n slurmfile.write(\"#SBATCH --mail-type=ALL\\n\")\n if hasattr(cliargs, \"qos\"):\n slurmfile.write(\"#SBATCH --qos={}\".format(cliargs.qos))\n slurmfile.write(\"\\n\\n\")\n slurmfile.write(\"set -e\\n\")\n slurmfile.write(\"source activate {}\\n\".format(cliargs.env))\n slurmfile.write(\"module load bioinfo-tools\\n\")\n for module in extramodules:\n slurmfile.write(module)\n\n slurmfile.write(\"deNovo_pipeline.py --global-config {} \"\n \"--sample-config {}\\n\\n\".format(cliargs.global_config, sample_config))\n\n command=(\"sbatch\", slurmfile_path)\n print(command)\n try:\n if cliargs.dry_run:\n return 0\n except AttributeError as e:\n print(\"Warning! Could not determine if dry-run, running the command anyway: {}\".format(e))\n return subprocess.call(command)", "def run_slurm(\n command: Sequence[str], *, env: Mapping[str, str], workdir: Path\n) -> Optional[SlurmJobID]:\n try:\n completed_process = run(\n command, cwd=workdir, capture_output=True, env=env, check=True\n )\n result = SlurmJobID(\n completed_process.stdout.decode(getpreferredencoding()).strip()\n )\n slurm_jobs_scheduled.append(result)\n except CalledProcessError as e:\n raise ValueError(f\"Job run failed for command {command}\") from e\n return result", "def get_cmd_line_args():\n parser = argparse.ArgumentParser(\n description=DOC, formatter_class=argparse.RawTextHelpFormatter)\n\n # Required arguments\n required = parser.add_argument_group(\"Required arguments\")\n required.add_argument(\n \"--fastq-regex\", required=True, type=is_fastq_regex, metavar=\"<regex>\",\n help=\n \"Snakemake regex used to infer the FASTQ files to process and the \"\n \"related wildcards: {sample} (mandatory), {lane} (optional) and \"\n \"{end} (mandatory if paired-end sequencing), e.g. \"\n \"/path/to/data/{sample}/{sample}_{ignore1}_{lane}_{end}_001.fastq.gz\"\n )\n required.add_argument(\n \"--outdir\", required=True, metavar=\"<dir>\", help=\"Output directory.\"\n )\n required.add_argument(\n \"--ref-build\", required=True, metavar=\"<version>\", help=\n \"Reference genome build, e.g. hg19 or mm10. Assuming the existence of \"\n \"the 3 following files in <--ref-genome-dir>: <ref build>.fa \"\n \"<ref build>.fa.fai and <ref build>.fa.dict\"\n )\n required.add_argument(\n \"--ref-genome-dir\", metavar=\"<dir>\", help=\n \"Bisulfite reference genome directory, including '<ref build>.fa', \"\n \"'<ref build>.fa.fai', '<ref build>.fa.dict' and the \"\n \"'Bisulfite_Genome' directory created by running the \"\n \"'bismark_genome_preparation' script. See README.md documentation.\"\n )\n\n\n # Optional general arguments\n optional = parser.add_argument_group(\"Optional\")\n optional.add_argument(\n \"--single-end\", action=\"store_false\", dest=\"paired_end\", help=\n \"By default paired-end sequencing is assumed, for single-end set this \"\n \"flag.\"\n )\n optional.add_argument(\n \"--rrbs\", action=\"store_true\", help=\n \"For Reduced Representation Bisulfite Sequencing (RRBS) set this flag.\"\n )\n optional.add_argument(\n \"--no-deduplication\", action=\"store_false\",\n dest=\"use_bismark_deduplicate\", help=\n \"Set this flag to not apply Bismark BAM deduplication. The deduplica\"\n \"tion removes reads with similar start/end positions on a given chromo\"\n \"some. It is not a valid PCR correction for RRBS or amplicon data. \"\n \"The deduplication is not applied if the --rrbs flag is set.\"\n )\n optional.add_argument(\n \"--non-directional-library\", action=\"store_false\",\n dest=\"directional_library\", help=\n \"By default the library is assumed to be directional, if not set this \"\n \"flag. See Bismark documentation for more information.\"\n )\n optional.add_argument(\n \"--target-bed\", type=is_file, metavar=\"<path>\", help=\n \"For targeted sequencing, the path to the BED file listing the regions\"\n \" targeted. Used only for read coverage computation. If no BED is pro\"\n \"vided the coverage will be computed on the whole reference genome.\"\n )\n optional.add_argument(\n \"--target-kit\", metavar=\"<name>\", help=\n \"For targeted sequencing, the name of the kit used to target to be \"\n \"reported in the preprocessing report. Does not affect processing.\"\n )\n optional.add_argument(\n \"--phred\", type=int, choices={33, 64}, default=DEFAULT_OF[\"phred\"],\n metavar=\"<33|64>\", help=\n \"Base quality encoding of input FASTQ files: 33|64, by default %i.\"\n % DEFAULT_OF[\"phred\"]\n )\n optional.add_argument(\n \"--r1-id\", default=DEFAULT_OF[\"r1_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R1 (forward) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r1_id\"]\n )\n optional.add_argument(\n \"--r2-id\", default=DEFAULT_OF[\"r2_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R2 (reverse) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r2_id\"]\n )\n optional.add_argument(\n \"--read-length\", type=int, metavar=\"<int>\", help=\n \"Length of reads (e.g. 150) to write in the HTML report. \"\n \"Does not affect the processing.\"\n )\n\n # Optional FastQC arguments\n fastqc = parser.add_argument_group(\"FastQC optional\")\n optional.add_argument(\n \"--fastqc-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"fastqc\"][\"threads\"], help=\n \"FastQC '--threads' argument, by default %i.\"\n % DEFAULT_OF[\"fastqc\"][\"threads\"]\n )\n\n # Optional Trim Galore arguments\n trim_galore = parser.add_argument_group(\"Trim Galore optional\")\n ADAPTERS_URL = (\n \"https://support.illumina.com/bulletins/2016/12/what-sequences-do-i\"\n \"-use-for-adapter-trimming.html\")\n trim_galore.add_argument(\n \"--adapter-r1\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter' argument: adapter sequence to be trimmed off \"\n \"read 1. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--adapter-r2\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter2' argument: adapter sequence to be trimmed \"\n \"off read 2. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--quality\", type=int, default=DEFAULT_OF[\"trim_galore\"][\"quality\"],\n metavar=\"<int>\", help=\n \"Trim Galore '--quality' argument, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"quality\"]\n )\n trim_galore.add_argument(\n \"--stringency\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"stringency\"], help=\n \"Trim Galore '--stringency' argument: overlap with adapter sequence \"\n \"required to trim, by default %i (very stringent).\"\n % DEFAULT_OF[\"trim_galore\"][\"stringency\"]\n )\n trim_galore.add_argument(\n \"--min-length\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"min_length\"], help=\n \"Trim Galore '--length' argument: minimum read length after trimming \"\n \"otherwise removed, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"min_length\"]\n )\n trim_galore.add_argument(\n \"--error-rate\", type=float, metavar=\"<float>\",\n default=DEFAULT_OF[\"trim_galore\"][\"error_rate\"], help=\n \"Trim Galore '-e' argument: maximum allowed error rate with the \"\n \"matching region, by default {}\"\n .format(DEFAULT_OF[\"trim_galore\"][\"error_rate\"])\n )\n trim_galore.add_argument(\n \"--max-n\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--max_n' argument: Maximum number of 'N's in a read \"\n \"otherwise removed. By default not applied.\"\n )\n trim_galore.add_argument(\n \"--trim-n\", action=\"store_true\", help=\n \"Trim Galore '--trim-n' argument: remove 'N's from ends of the read.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R1' argument: remove basepairs from 5' end of \"\n \"read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R2' argument: remove basepairs from 5' end of \"\n \"read 2. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R1' argument: remove basepairs from \"\n \"3' end of read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R2' argument: remove basepairs from \"\n \"3' end of read 2. Useful if there is a methylation bias at this end.\"\n )\n\n # Optional Bismark tools arguments\n bismark = parser.add_argument_group(\"Bismark optional\")\n bismark.add_argument(\n \"--seed-mismatch\", type=int, choices=[0, 1], metavar=\"<0|1>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"], help=\n \"Maximum number of mismatch allowed in a seed alignment: 0|1, \"\n \"by default %i.\" % DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"]\n )\n bismark.add_argument(\n \"--bowtie2-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"], help=\n \"Bowtie2 '--threads' argument, by default %i\"\n % DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"])\n bismark.add_argument(\n \"--meth-extract-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"], help=\n \"bismark_methylation_extractor '--multicore' argument, by default %i.\"\n % DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"]\n )\n\n # Optional Picard arguments\n picard = parser.add_argument_group(\"Picard optional\")\n picard.add_argument(\n \"--picard-jvm-args\", default=DEFAULT_OF[\"picard\"][\"jvm_args\"],\n metavar=\"<args>\", help=\n \"Java virtual machine arguments, e.g. to control starting and maximum \"\n \"heap size when running Picard, by default '%s'.\"\n % DEFAULT_OF[\"picard\"][\"jvm_args\"]\n )\n\n # Optional Samtools arguments\n samtools = parser.add_argument_group(\"Samtools optional\")\n samtools.add_argument(\n \"--samtools-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"samtools\"][\"threads\"], help=\n \"Samtools '--threads' argument, by default %i\"\n % DEFAULT_OF[\"samtools\"][\"threads\"])\n\n # Parse the command line\n args = parser.parse_args()\n\n # For paired-end sequencing, check that the '{end}' wildcard is provided\n if args.paired_end is True and \"{end}\" not in args.fastq_regex:\n raise ValueError(\n \"The wildcard '{end}' is required in --fastq-regex argument when \"\n \"working with paired-end sequencing.\")\n\n # Set 'use_bismark_deduplicate' to False if RRBS data\n if args.rrbs is True:\n args.use_bismark_deduplicate = False\n\n # Check reference genome directory\n is_ref_genome_dir(args.ref_genome_dir, args.ref_build)\n\n # Convert the argparse object to a dict mapping <arg name> -> <val>\n kwargs = vars(args)\n\n return kwargs", "def sbatch(conn):\n sbatch_script_name = \"./sbatch_script.sh\"\n job_id = conn.run(f\"sbatch {sbatch_script_name}\").stdout.rsplit(None, 1)[-1]\n def check_state(prev_state):\n try:\n state, reason = conn.run(\n f\"squeue -j {job_id} -O state,reason -h\", hide=True).stdout.split()\n except:\n state = conn.run(f\"sacct -j {job_id} --format=state | head -1\",\n hide=True).stdout.strip()\n reason = None\n if state == \"PENDING\":\n if prev_state != state:\n logging.info(f\"{Fore.CYAN}PENDING({reason}){Style.RESET_ALL}\")\n elif state == \"RUNNING\":\n if prev_state != state:\n logging.info(f\"{Fore.CYAN}RUNNING{Style.RESET_ALL}\")\n elif state == \"FAILED\":\n if prev_state != state:\n logging.info(f\"{Fore.RED}FAILED({reason}){Style.RESET_ALL}\")\n else:\n if prev_state != state:\n logging.info(f\"{Fore.RED}{state}({reason}){Style.RESET_ALL}\")\n return state\n\n state = None\n while True:\n state = check_state(prev_state=state)\n if state == \"RUNNING\":\n node = conn.run(f\"squeue -j {job_id} -O nodelist -h\",\n hide=True).stdout.strip()\n return node, job_id\n elif state == \"FAILED\":\n stdout = conn.run(\"cat slurm.out\").stdout\n stderr = conn.run(\"cat slurm.err\").stdout\n logging.info(f\"{Fore.RED}stdout{Style.RESET_ALL}\")\n logging.info(stdout)\n logging.info(f\"{Fore.RED}stderr{Style.RESET_ALL}\")\n logging.info(stderr)\n return None, job_id\n elif state == \"PENDING\":\n sleep(10)\n continue\n else:\n logging.info(state)\n return None, job_id", "def build_slurm_header(pars):\n name = pars.get('name','default')\n job = 'job_'+name\n\n lines = []\n lines.append('#!/bin/bash')\n lines.append('#SBATCH --nodes=%s ### Number of nodes'%pars['nodes'])\n lines.append('#SBATCH --ntasks-per-node=%s ### Number of MPI tasks per node'%pars['ntasks_per_node'])\n lines.append('#SBATCH --cpus-per-task=%s ### Number of HT per task'%pars['cpus_per_task'])\n if pars['gpus_per_node'] is not None:\n lines.append('#SBATCH --gpus-per-node=%s ### Number of GPUS per node'%pars['gpus_per_node'])\n if pars['memory'] is not None:\n lines.append('#SBATCH --mem %s ### Memory per node'%pars['memory'])\n if pars['time'] is not None:\n lines.append('#SBATCH --time %s ### Walltime, format: HH:MM:SS'%pars['time'])\n if pars['partition'] is not None:\n lines.append('#SBATCH --partition %s'%pars['partition'])\n if pars['account'] is not None:\n lines.append('#SBATCH --account %s'%pars['account'])\n if pars['qos'] is not None:\n lines.append('#SBATCH --qos %s'%pars['qos'])\n lines.append('#SBATCH --job-name=%s'%job)\n lines.append('#SBATCH --output=%s.out'%job)\n lines.append('')\n lines.append('export OMP_NUM_THREADS=%s'%pars['omp_num_threads'])\n lines.append('')\n lines.append('echo \"Cluster name $SLURM_CLUSTER_NAME\"')\n lines.append('echo \"Job name $SLURM_JOB_NAME \"')\n lines.append('echo \"Job id $SLURM_JOB_ID\"')\n lines.append('echo \"Job nodelist $SLURM_JOB_NODELIST\"')\n lines.append('echo \"Number of nodes $SLURM_JOB_NUM_NODES\"')\n lines.append('echo \"Number of tasks $SLURM_NTASKS\"')\n lines.append('echo \"Number of tasks per node $SLURM_TASKS_PER_NODE\"')\n lines.append('echo \"Number of threads per task $SLURM_CPUS_PER_TASK\"')\n lines.append('echo \"Number of gpus per node $SLURM_GPUS_PER_NODE\"')\n lines.append('echo \"OMP_NUM_THREADS : $OMP_NUM_THREADS\"')\n lines.append('')\n lines.append('echo \" \"')\n lines.append('echo \"###############End of the header section###############\"')\n lines.append('echo \" \"')\n lines.append('')\n\n return lines", "def process_command_line(argv):\n parser = argparse.ArgumentParser(description='Run SMRS algorithm.')\n #Positional args\n parser.add_argument('files', metavar='molec1.ext',\n nargs='+', help='files to be processed')\n #Optional true/false args\n parser.add_argument('-d', '--dihed',\n help='Use dihedral angles as coordinates',\n action='store_true')\n parser.add_argument('-n', '--nonH',\n help='Do not include hydrogen in coords',\n action='store_true')\n parser.add_argument('-e', '--energy', help='Append energy at the end of \\\n molecules vector', action='store_true')\n parser.add_argument('--delCoordCSV', help='Delete CSV file with molecule \\\n coordinates', action='store_true')\n parser.add_argument('--delCoefCSV', help='Delete C matrix CSV file',\n action='store_true')\n parser.add_argument('--folder', help='Name of folder, where representative\\\n molecules will be saved', action='store_true')\n parser.add_argument('--sdf', help='If you want to calculate pybel\\\n fingerprints', action='store_true')\n #Optional args with value\n parser.add_argument('--alpha', help='Specify lambda paramter',\n type=int, metavar='A')\n parser.add_argument('--division', help='Specify type and parametrs\\\n of molecule division into groups. Parametrs should\\\n be separated by comma without spaces', metavar='name')\n parser.add_argument('--pruneStart', help='Specify minimum RMSD by which \\\n molecules should be separated in starting set',\n type=float, metavar='RMSD')\n parser.add_argument('--pruneFinish', help='Specify minimum RMSD by which \\\n representive molecules should be separated',\n type=float, metavar='RMSD')\n parser.add_argument('--format', help='Babel type of input molecules')\n return parser.parse_args(argv)", "def _construct_crawl_command(self, site_list, crawl_id):\n #path = os.path.realpath(__file__).rpartition('/')[0]\n cmd_line = (\"python {}/spiderrunner.py {} -r host:{},port:{} -m {}\"\n \" -t {} -c {}\").format(\n _spdr_engine_location(), site_list, self.engine_redis_host,\n self.engine_redis_port, self.mappers,\n self.max_pages, crawl_id)\n if self.psuedo_dist:\n cmd_line += \" -d\"\n return cmd_line", "def parse_shared_configuration(parsed: Namespace) -> StandardArgs: \n\n # example srun_command: srun --exclusive -n 1 -p <partition>\n slurm_command = f\"srun -n 1 -p {parsed.slurm_partition} {'--exclusive' if not parsed.slurm_accept_shared_nodes else ''}\"\n if parsed.slurm_gpus_per_node > 0:\n slurm_command += f\" --gpus-per-node={parsed.slurm_gpus_per_node}\"\n if parsed.outfile is not None and parsed.outfile != '' and os.path.exists(parsed.outfile) and parsed.outfile != \"/dev/null\":\n raise Exception('Error: Requested to write to an existing output file. Aborting to avoid overwriting file.')\n # configure verbosity for the run\n print_per_verbose.__dict__['verbosity_level'] = parsed.verbose or 0\n\n # We would very much like to avoid this manual copying, unfortunately the argsparse module and the typing module\n # don't play at all nicely with each other. Maybe fix this later (keyword TAP/typed arg parser)\n return StandardArgs(\n test = parsed.test,\n timeout_min = parsed.timeout_min,\n outfile = parsed.outfile,\n workercount = max(parsed.workercount, 1),\n # As a reminder, argparse converts internal -es to _s to keep the identifiers valid\n job_cache = None if parsed.no_job_cache else parsed.job_cache,\n use_container = parsed.use_container or \\\n ((not parsed.no_container) \\\n and os.getenv('HITHER_USE_CONTAINER') in ['TRUE', '1']),\n use_slurm = parsed.use_slurm,\n slurm_max_jobs_per_alloc = parsed.slurm_jobs_per_allocation,\n slurm_max_simultaneous_allocs = parsed.slurm_max_simultaneous_allocations,\n slurm_command = slurm_command\n )", "def _get_fastq_to_sam_cmd(fwd_reads, sample_name, read_group, rev_reads=None):\n\n cmd = [\n 'java', '-jar', '/picard/picard.jar', 'FastqToSam',\n f'F1={fwd_reads}',\n f'O=/dev/stdout',\n 'QUIET=true',\n f'SM={sample_name}',\n f'RG={read_group}'\n ]\n if rev_reads is not None:\n cmd.append(f'F2={rev_reads}')\n\n return cmd", "def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return", "def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Assemble raw reads using ABySS\"\"\"\n )\n parser.add_argument(\n \"--output\",\n required=True,\n action=FullPaths,\n default=None,\n help=\"\"\"The directory in which to store the assembly data\"\"\",\n )\n parser.add_argument(\n \"--kmer\", type=int, default=31, help=\"\"\"The kmer value to use\"\"\"\n )\n parser.add_argument(\n \"--cores\",\n type=int,\n default=1,\n help=\"\"\"The number of compute cores/threads to run with Trinity\"\"\",\n )\n parser.add_argument(\n \"--subfolder\",\n type=str,\n default=\"\",\n help=\"\"\"A subdirectory, below the level of the group, containing the reads\"\"\",\n )\n parser.add_argument(\n \"--verbosity\",\n type=str,\n choices=[\"INFO\", \"WARN\", \"CRITICAL\"],\n default=\"INFO\",\n help=\"\"\"The logging level to use\"\"\",\n )\n parser.add_argument(\n \"--log-path\",\n action=FullPaths,\n type=is_dir,\n default=None,\n help=\"\"\"The path to a directory to hold logs.\"\"\",\n )\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Cleanup all intermediate Trinity files\"\"\",\n )\n parser.add_argument(\n \"--abyss-se\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Only use abyss-se\"\"\",\n )\n # one of these is required. The other will be set to None.\n input_data = parser.add_mutually_exclusive_group(required=True)\n input_data.add_argument(\n \"--config\",\n type=is_file,\n action=FullPaths,\n default=None,\n help=\"\"\"A configuration file containing reads to assemble\"\"\",\n )\n input_data.add_argument(\n \"--dir\",\n type=is_dir,\n action=FullPaths,\n default=None,\n help=\"\"\"A directory of reads to assemble\"\"\",\n )\n return parser.parse_args()", "def parse_jobscript():\n p = argparse.ArgumentParser(description=\"SGE snakemake submit script\")\n p.add_argument(\"jobscript\", help=\"Snakemake jobscript with job properties.\")\n return p.parse_args().jobscript", "def execute(self,cmd,runsetname,stdoutfile=None,stderrfile=None,logger=None):\n # For options like \"help\" and \"usage\", the parent method should be called.\n for option in SBATCH_NOSUBMIT_OPTIONS:\n if \"--%s\" % option in cmd:\n return super(self.__class__,self).execute(cmd,runsetname,stdoutfile=stdoutfile,stderrfile=stderrfile,logger=logger)\n \n if logger is None:\n logger = self.logger\n if stdoutfile is None:\n stdoutfile = logger.getStdOutFileName()\n if stderrfile is None:\n stderrfile = logger.getStdErrFileName()\n \n \n hostname = socket.gethostname().split('.',1)[0]\n pid = os.fork()\n if pid == 0:\n proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n (out,err) = proc.communicate()\n if err:\n raise Exception(\"sbatch submission failed %s\" % err)\n \n jobid = out.split()[-1]\n \n starttime = datetime.datetime.now()\n runset = []\n runlog = RunLog( jobid=jobid,\n cmdstring=cmd,\n starttime=starttime,\n hostname=hostname,\n stdoutfile=stdoutfile,\n stderrfile=stderrfile,\n runner=\"%s.%s\" % (self.__module__, self.__class__.__name__)\n )\n runset.append(runlog)\n if self.verbose > 0:\n print runlog\n logger.saveRunSet(runset, runsetname)\n os._exit(0)\n else:\n # Wait until the runset file has been written\n ready = False\n while not ready:\n time.sleep(2)\n try:\n logger.getRunSet(runsetname)\n ready = True\n except Exception:\n pass\n return None", "def cmd(self):\n settings = self.settings\n\n command = ['ruby', '-S']\n\n if settings.get('use_bundle_exec', False):\n command.extend(['bundle', 'exec'])\n\n command.extend(['slim-lint'])\n\n return command", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)", "def create_batch_config(slurm_config):\n\n # magic number\n b = \"#!/bin/bash%s\" % sl\n\n #########################\n # auto-generated header #\n #########################\n b += \"######################################################%s\" % sl\n b += \"# WARNING - AUTO GENERATED FILE%s\" % sl\n b += \"# Please don't modify that file manually%s\" % sl\n b += \"######################################################%s\" % sl\n\n ######################\n # node configuration #\n ######################\n # job name\n b += \"#SBATCH --job-name=\\\"%s%d\\\"%s\" % (slurm_config.job_name,\n slurm_config.job_number, sl)\n\n # number of nodes required to execute the job\n b += \"#SBATCH --nodes=%s%s\" % (slurm_config.num_nodes, sl)\n\n # number of cpus per tasks\n b += \"#SBATCH --cpus-per-task=%s%s\" % (slurm_config.num_cpus_per_task, sl)\n\n # number of tasks\n b += \"#SBATCH --ntasks=%s%s\" % (slurm_config.num_tasks_per_node, sl)\n\n # memory required per task in Mbytes\n b += \"#SBATCH --mem=%s%s\" % (slurm_config.memory_mb, sl)\n\n # slurm session time\n b += \"#SBATCH --time=%s%s\" % (slurm_config.session_time, sl)\n\n # job partition\n b += \"#SBATCH --partition=%s%s\" % (slurm_config.partition, sl)\n\n # job account\n b += \"#SBATCH --account=%s%s\" % (slurm_config.project_name, sl)\n\n # On which nodes, this job will be executed\n # This option is used if the required modules are installed on a specific\n # node\n # b += \"#SBATCH --nodelist=%s%s\" % (slurm_config.node_list, sl)\n\n #####################\n # user notification #\n #####################\n if slurm_config.enable_email_notification:\n b += \"#SBATCH --mail-type=ALL%s\" % sl\n b += \"#SBATCH --mail-user=%s%s\" % (slurm_config.user_email, sl)\n\n ##################\n # log generation #\n ##################\n if slurm_config.enable_logs:\n std_out = \"%s/slurm-stdout_%d.log\" % \\\n (slurm_config.log_files_path, slurm_config.job_number)\n std_err = \"%s/slurm-stderr_%d.log\" % \\\n (slurm_config.log_files_path, slurm_config.job_number)\n b += \"#SBATCH --output=%s%s\" % (std_out, sl)\n b += \"#SBATCH --error=%s%s\" % (std_err, dl)\n\n ####################\n # System variables #\n ####################\n # slurm profile\n b += \"# Loading profiles%s\" % sl\n b += \"%s%s\" % (slurm_config.profile, dl)\n\n # job home\n b += \"#JOB_HOME=\\\"%s\\\"%s\" % (slurm_config.execution_path, sl)\n\n # KERBEROS renewal\n b += \"# Renewal of KERBEROS periodically for the length of the job%s\" % sl\n b += \"krenew -b -K 30%s\" % dl\n\n # slurm modules\n b += \"# Loading the modules.%s\" % sl\n b += \"%s%s\" % (slurm_config.modules, dl)\n\n # environmental variables\n b += \"# Setting the environmental variables.%s\" % sl\n b += \"export PATH=%s:$PATH%s\" % (slurm_config.env_path, sl)\n b += \"export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH%s\" % \\\n (slurm_config.env_ld_library_path, sl)\n b += \"export PYTHONPATH=%s:$PYTHONPATH%s\" % (slurm_config.env_python_path,\n dl)\n # node list\n b += \"echo \\\"On which node your job has been scheduled :\\\"%s\" % sl\n b += \"echo $SLURM_JOB_NODELIST%s\" % dl\n\n # shell limits\n b += \"echo \\\"Print current shell limits :\\\"%s\" % sl\n b += \"ulimit -a%s\" % dl\n\n # running the serial tasks.\n b += \"echo \\\"Now run your serial tasks ...\\\"%s\" % sl\n b += \"cd %s%s\" % (slurm_config.execution_path, dl)\n ####################################################################\n\n return b", "def create_slurm_options_string(slurm_options: dict, srun: bool = False):\n if srun:\n option_structure = \" {prepend}{key}={value}\"\n else:\n option_structure = \"#SBATCH {prepend}{key}={value}\\n\"\n\n slurm_options_str = \"\"\n for key, value_raw in slurm_options.items():\n prepend = '-' if len(key) == 1 else '--'\n if key in ['partition', 'p'] and isinstance(value_raw, list):\n value = ','.join(value_raw)\n else:\n value = value_raw\n slurm_options_str += option_structure.format(prepend=prepend, key=key, value=value)\n return slurm_options_str", "def start_srun_job(collection, exp, unobserved=False,\n srun_options=None, seml_arguments=None):\n\n # Construct srun options string\n # srun will run 2 processes in parallel when ntasks is not specified. Probably because of hyperthreading.\n if 'ntasks' not in srun_options:\n srun_options['ntasks'] = 1\n srun_options_str = create_slurm_options_string(srun_options, True)\n\n if not unobserved:\n collection.update_one(\n {'_id': exp['_id']},\n {'$set': {'slurm.sbatch_options': srun_options}})\n\n # Set command args for job inside Slurm\n cmd_args = f\"--local --sacred-id {exp['_id']} \"\n cmd_args += ' '.join(seml_arguments)\n\n cmd = (f\"srun{srun_options_str} seml {collection.name} start {cmd_args}\")\n try:\n subprocess.run(cmd, shell=True, check=True)\n except subprocess.CalledProcessError as e:\n logging.error(f\"Could not start Slurm job via srun. Here's the sbatch error message:\\n\"\n f\"{e.stderr.decode('utf-8')}\")\n exit(1)", "def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths", "def parse_command_line(args):\r\n\r\n\r\n # Initial validity check of input command: each argument may be\r\n # defined only once\r\n for a in args:\r\n if a.startswith('--'):\r\n if args.count(a) > 1:\r\n raise STAPLERerror.STAPLERerror('Each command line parameter can be '\r\n 'defined only once! The following '\r\n 'parameter was defined multiple times:\\n '\r\n '{0}'.format(a))\r\n\r\n # Initialize a named tuple to store all available command line arguments\r\n Command_line_parameters = namedtuple('Input_file_parameters',\r\n ['all_parameters',\r\n 'staplerfile_path',\r\n 'resource_manager',\r\n 'max_job_count',\r\n 'auto_split_workflows',\r\n 'compress_run',\r\n 'validate_run',\r\n 'fix_run',\r\n 'rm_workflow'])\r\n\r\n # Parse user command line and check sanity of values\r\n\r\n # Parse the resource manager to use\r\n all_parameters = ' '.join(args)\r\n resource_manager = None\r\n if '--lsf' in args:\r\n resource_manager = 'lsf'\r\n args.remove('--lsf')\r\n if '--sge' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'sge'\r\n args.remove('--sge')\r\n if '--slurm' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'slurm'\r\n args.remove('--slurm')\r\n if '--torque' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'torque'\r\n args.remove('--torque')\r\n if '--UNIX' in args or resource_manager is None:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'unix'\r\n if '--UNIX' in args: args.remove('--UNIX')\r\n\r\n # Parse the limit for maximum number of jobs to spawn\r\n if '--max_job_count' in args:\r\n if resource_manager is None:\r\n raise STAPLERerror.STAPLERerror('--max_job_count parameter can only be defined '\r\n 'if a resource manager is also defined '\r\n '(e.g. --slurm)!')\r\n try:\r\n max_job_count = int(args[args.index('--max_job_count')+1])\r\n except (TypeError, IndexError):\r\n raise STAPLERerror.STAPLERerror('--max_job_count requires a positive integer '\r\n 'value, e.g. --max_job_count 16')\r\n if max_job_count < 1:\r\n raise STAPLERerror.STAPLERerror('--max_job_count requires a positive integer '\r\n 'value, e.g. --max_job_count 16')\r\n args.pop(args.index('--max_job_count')+1)\r\n args.remove('--max_job_count')\r\n else:\r\n if resource_manager == 'unix':\r\n max_job_count = multiprocessing.cpu_count()\r\n else:\r\n max_job_count = None\r\n\r\n # Parse workflow control parameters\r\n if '--priority' in args:\r\n if resource_manager is None:\r\n raise STAPLERerror.STAPLERerror('--priority parameter can be used only if a '\r\n 'resource manager (e.g. SLURM) is specified!')\r\n if resource_manager == 'unix':\r\n raise STAPLERerror.STAPLERerror('--priority parameter cannot be '\r\n 'used in combination with --UNIX '\r\n 'parameter!')\r\n try:\r\n if (args[args.index('--priority')+1]).lower() in ('continuous', 'c'):\r\n auto_split_workflows = False\r\n elif (args[args.index('--priority')+1]).lower() in ('split', 's'):\r\n auto_split_workflows = True\r\n else:\r\n raise STAPLERerror.STAPLERerror('Allowed values for --priority parameter are '\r\n '\"continuous\", \"c\", \"split\" and \"s\"!')\r\n except (TypeError, IndexError):\r\n raise STAPLERerror('--priority parameter requires a value! Allowed values are '\r\n '\"continuous\", \"c\", \"split\" and \"s\" ')\r\n args.pop(args.index('--priority')+1)\r\n args.remove('--priority')\r\n else:\r\n if resource_manager == 'unix':\r\n auto_split_workflows = True\r\n else:\r\n auto_split_workflows = False\r\n compress_run = None\r\n\r\n # Parse workflow compression/decompression parameters\r\n if '--compress' in args:\r\n compress_run = 'compress'\r\n args.remove('--compress')\r\n if '--decompress' in args:\r\n if '--compress' in args:\r\n raise STAPLERerror.STAPLERerror('--compress and --decompress parameters can '\r\n 'not be used simultaneously!')\r\n compress_run = 'decompress'\r\n args.remove('--decompress')\r\n\r\n # Parse workflow validation/fixing/removing parameters\r\n if '--validate_run' in args:\r\n validate_run = True\r\n args.remove('--validate_run')\r\n else:\r\n validate_run = False\r\n if '--fix_run' in args:\r\n fix_run = True\r\n args.remove('--fix_run')\r\n else:\r\n fix_run = False\r\n if '--remove' in args:\r\n rm_workflow = True\r\n args.remove('--remove')\r\n else:\r\n rm_workflow = False\r\n\r\n # Parse path to staplefile. All other valid parameters are now read & removed\r\n # from args.\r\n if len(args) == 1:\r\n if os.path.isfile(args[0]):\r\n staplerfile_path = args[0]\r\n else:\r\n raise STAPLERerror.STAPLERerror('Command line contains an odd value:\\n{0}\\n '\r\n 'This is not an existing path to a staplerfile '\r\n 'or any other recognized parameter!'.format(\r\n args[0]))\r\n elif len(args) == 0:\r\n raise STAPLERerror.STAPLERerror('Command line is missing path to staplerfile!')\r\n elif len(args) > 1:\r\n for a in args:\r\n if os.path.isfile(a):\r\n odd_values = args\r\n args.remove(a)\r\n raise STAPLERerror.STAPLERerror('Command line contains some odd '\r\n 'parameters! The string \"{0}\" is '\r\n 'probably the path to stapler file, '\r\n 'but the following parameters are '\r\n 'unknown:\\n{1}\\nFor more info, '\r\n 'type\\npython STAPLER.py -h'.format(a,\r\n '\\n'.join(odd_values)))\r\n raise STAPLERerror.STAPLERerror('Command line is missing path to staplerfile! '\r\n 'Instead, some odd parameters are present:\\n{0}'.format('\\n'.join(args)))\r\n\r\n # Do further validity checks for different parameter combinations\r\n if validate_run and fix_run:\r\n raise STAPLERerror.STAPLERerror('--validate_run and --fix_run cannot be used '\r\n 'in the same command!')\r\n if validate_run and rm_workflow:\r\n raise STAPLERerror.STAPLERerror('--remove_WORKFLOW and --validate_run cannot be '\r\n 'used in the same command!')\r\n if fix_run and rm_workflow:\r\n raise STAPLERerror.STAPLERerror('--fix_run and REMOVE_WORKFLOW cannot be used in '\r\n 'the same command!')\r\n if compress_run is not None:\r\n if validate_run or rm_workflow or fix_run:\r\n raise STAPLERerror.STAPLERerror('--validate_run, --remove_WORKFLOW or --fix_run '\r\n 'parameters cannot be used in the same command '\r\n 'with --COMRESS_RUN!')\r\n if validate_run or rm_workflow:\r\n if resource_manager is not 'unix':\r\n raise STAPLERerror.STAPLERerror('Resource managers cannot be used when '\r\n 'removing workflows!')\r\n\r\n command_line_parameters = Command_line_parameters(\r\n all_parameters=all_parameters,\r\n staplerfile_path=staplerfile_path,\r\n resource_manager=resource_manager,\r\n max_job_count=max_job_count,\r\n auto_split_workflows=auto_split_workflows,\r\n compress_run=compress_run,\r\n validate_run=validate_run,\r\n fix_run=fix_run,\r\n rm_workflow=rm_workflow)\r\n\r\n return command_line_parameters", "def makeJob(kallisto, index, meta, bootstraps, files, single, s=1, l=180): \n cmd = \"%(kallisto)s quant -i %(index)s -o %(meta)s \" % locals()\n for file in files: \n cmd += \" ../%s\" % file \n if single: \n cmd += \" --single -l %(l)i -s %(s)i\" % locals()\n cmd += \" &> %s.log.txt\" % meta\n return cmd", "def form_bowtie_cmd_list(bowtie_fp, index_fp, pe1_fastq, pe2_fastq, u_fastq, output_sam_fp):\n\n # TODO this might not be necessary for now\n if bowtie_fp is '':\n raise ValueError('bowtie2_path is empty')\n if index_fp is '':\n raise ValueError('index_path is empty')\n if not pe1_fastq and not pe2_fastq and not u_fastq:\n raise ValueError('no fastq files specified')\n if output_sam_fp is '':\n raise ValueError('output_file_path is empty')\n\n # required arguments\n call_args_list = [bowtie_fp, '-x', index_fp]\n\n # add comma separated list of fastq files to process\n if pe1_fastq:\n call_args_list.append('-1')\n pe1_fq_str = \",\".join(pe1_fastq)\n call_args_list.append(pe1_fq_str)\n if pe2_fastq:\n call_args_list.append('-2')\n pe2_fq_str = \",\".join(pe2_fastq)\n call_args_list.append(pe2_fq_str)\n if u_fastq:\n call_args_list.append('-U')\n u_fq_str = \",\".join(u_fastq)\n call_args_list.append(u_fq_str)\n\n call_args_list.extend(['-S', output_sam_fp])\n\n return call_args_list", "def call_command_line(string, **kwargs):\n return subprocess.run(string.split(\" \"), **kwargs)" ]
[ "0.6520527", "0.6046256", "0.59337866", "0.5885702", "0.5820681", "0.57094526", "0.56163365", "0.5544981", "0.5541964", "0.5485391", "0.5475854", "0.5465947", "0.5405424", "0.5392933", "0.536621", "0.53590006", "0.5328773", "0.5317938", "0.5304243", "0.5289871", "0.52894807", "0.5284522", "0.5255762", "0.5251174", "0.5250602", "0.52194405", "0.5186839", "0.5177902", "0.5167411", "0.5167305" ]
0.7210597
0
Construct a Bash command to run the given sbatch script. This returns a sequence of commandline arguments that can be passed directly to subprocess.run(). Note the sbatch commands are ignored.
def build_bash_command( script_path: Path, *, # pylint:disable=unused-argument dependencies: Sequence[SlurmJobID] = (), job_name: Optional[str] = None, log_dir: Optional[Path] = None, email: Optional[Email] = None, mail_types: Sequence[MailType] = (), extra_sbatch_args: Sequence[str] = (), # pylint:enable=unused-argument script_args: Sequence[str], ) -> Sequence[str]: result = ["bash", str(script_path)] result.extend(script_args) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_sbatch_command(\n script_path: Path,\n *,\n dependencies: Sequence[SlurmJobID] = (),\n job_name: Optional[str] = None,\n log_dir: Optional[Path] = None,\n email: Optional[Email] = None,\n mail_types: Sequence[MailType] = (),\n extra_sbatch_args: Sequence[str] = (),\n script_args: Sequence[str],\n) -> Sequence[str]:\n result = [\"sbatch\", \"--parsable\", \"--requeue\"]\n if dependencies:\n result.extend(\n [\n # Don't run this job until the dependencies complete successfully.\n f\"--dependency=afterok:{':'.join(dependencies)}\",\n # If any of the dependencies fail, cancel this job.\n \"--kill-on-invalid-dep=yes\",\n ]\n )\n if job_name:\n result.append(f\"--job-name={job_name}\")\n if log_dir:\n log_dir.mkdir(exist_ok=True, parents=True)\n result.append(f\"--output={str(log_dir.joinpath('R-%x.%j.out'))}\")\n\n result.extend(_build_email_arg(email=email, mail_types=mail_types))\n result.extend(extra_sbatch_args)\n result.append(str(script_path))\n result.extend(script_args)\n\n return result", "def cmd(self):\n settings = self.settings\n\n command = ['ruby', '-S']\n\n if settings.get('use_bundle_exec', False):\n command.extend(['bundle', 'exec'])\n\n command.extend(['slim-lint'])\n\n return command", "def build_sh_cmd(cmd, cwd=None):\n args = cmd.split()\n return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])", "def run_bash(\n command: Sequence[str], *, env: Mapping[str, str], workdir: Path\n) -> Optional[SlurmJobID]:\n run(command, cwd=workdir, env=env, check=True)\n return None", "def submitSlurmScript(commands_list, outputName = None):\n longString = \";\".join(commands_list)\n print(longString.replace(\";\", \"\\n\"))\n if outputName is not None:\n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --output {outputSlurm} --wrap=\"{commandString}\"'.format(commandString = longString, outputSlurm = outputName)\n else: \n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --wrap=\"{0}\"'.format(longString)\n os.system(sCommand)", "def _cmd_builder(self, test_config):\n arg_str = ''\n for key, value in sorted(test_config['args'].items()):\n arg_str += '--{} {} '.format(key, value)\n return test_config['pycmd'].format(arg_str)", "def createScript_sbatch(self):\n tools_createScript_sbatch(\n sbatch_script_file_name = self.sbatchFile_addMEM,\n executable = self.executable_addMEM,\n command_line_parameters = self.cfgFiles_addMEM_modified,\n input_file_names = self.inputFiles,\n output_file_names = self.outputFiles,\n script_file_names = self.shFiles_addMEM_modified,\n log_file_names = self.logFiles_addMEM,\n keep_logs = False,\n working_dir = self.workingDir,\n max_num_jobs = 100000000, # it's really silly to limit the number of jobs; use an enormous number as the ,,fix''\n cvmfs_error_log = self.cvmfs_error_log,\n pool_id = self.pool_id,\n use_home = self.use_home,\n validate_outputs = self.check_output_files,\n max_num_submittedJobs = 2000,\n )", "def sbatch(self, cmd, alloc=None, walltime=None, memory=None, nodes=1,\n feature=None, name='reV', stdout_path='./stdout', keep_sh=False,\n conda_env=None, module=None,\n module_root='/shared-projects/rev/modulefiles'):\n\n if len(name) > self.MAX_NAME_LEN:\n msg = ('Cannot submit job with name longer than {} chars: \"{}\"'\n .format(self.MAX_NAME_LEN, name))\n logger.error(msg)\n raise ValueError(msg)\n\n status = self.check_status(job_name=name)\n\n if status is not None:\n logger.info('Not submitting job \"{}\" because it is in '\n 'squeue or has been recently submitted'.format(name))\n out = None\n err = 'already_running'\n\n else:\n fname = '{}.sh'.format(name)\n self.make_path(stdout_path)\n\n # make all the sbatch arguments\n sb_a = f'#SBATCH --account={alloc}' if alloc is not None else ''\n walltime = self.format_walltime(walltime)\n sb_t = f'#SBATCH --time={walltime}' if walltime is not None else ''\n sb_jn = f'#SBATCH --job-name={name} # job name'\n sb_no = f'#SBATCH --nodes={nodes} # number of nodes'\n sb_out = f'#SBATCH --output={stdout_path}/{name}_%j.o'\n sb_err = f'#SBATCH --error={stdout_path}/{name}_%j.e'\n\n sbf, sbm, env_str = self._special_cmd_strs(feature, memory, module,\n module_root, conda_env)\n\n script_args = ['#!/bin/bash']\n sb_args = (sb_a, sb_t, sb_jn, sb_no, sb_out, sb_err, sbf, sbm,\n env_str)\n for sb_arg in sb_args:\n if sb_arg:\n script_args.append(sb_arg)\n\n script_args.append('echo Running on: $HOSTNAME, '\n 'Machine Type: $MACHTYPE')\n script_args.append(cmd)\n\n script = '\\n'.join(script_args)\n\n # write the shell script file and submit as qsub job\n self.make_sh(fname, script)\n out, err = self.submit('sbatch {script}'.format(script=fname))\n out = self._job_id_or_out(out)\n\n if not keep_sh:\n self.rm(fname)\n\n if err:\n msg = 'Received a SLURM error or warning: {}'.format(err)\n logger.warning(msg)\n warn(msg, SlurmWarning)\n else:\n job_id = int(out.split(' ', maxsplit=-1)[-1])\n out = str(job_id)\n logger.debug('SLURM job \"{}\" with id #{} submitted '\n 'successfully'.format(name, job_id))\n self._queue[job_id] = {self.QCOL_ID: job_id,\n self.QCOL_NAME: name,\n self.QCOL_STATUS: 'PD'}\n\n return out, err", "def bash(cmd):\n subprocess.run(cmd, shell=True, executable='/bin/bash') # ,", "def execute_sbatch(cmd_submit, stdin, env, cmd_nbscript): # pylint: disable=unused-argument\n p = subprocess.Popen(cmd_submit, stdin=subprocess.PIPE, env=env)\n p.stdin.write(stdin)\n p.stdin.close()\n p.wait()\n return p.returncode", "def prepare_automation_command(automation_script, pickles, work_dir):\n # Get path to script responsible for running automation job\n automation_script_path = os.path.join(os.path.dirname(__file__), 'automators', automation_script)\n\n # Prepare command\n cmd = 'python ' \\\n '{script} ' \\\n '--redmine_instance {redmine_pickle} ' \\\n '--issue {issue_pickle} ' \\\n '--work_dir {work_dir} ' \\\n '--description {description_pickle}'.format(script=automation_script_path,\n redmine_pickle=pickles['redmine_instance'],\n issue_pickle=pickles['issue'],\n description_pickle=pickles['description'],\n work_dir=work_dir)\n return cmd", "def bash_command(cmd):\n subprocess.Popen(['/bin/bash', '-c', cmd])", "def _make_cmdline(self, line):\n if isinstance(line, list):\n parts = line\n else:\n parts = line.split(\" \", 1)\n cmd = parts[0]\n exe = os.path.join(BINDIR, cmd)\n\n python_cmds = [\"samba-tool\",\n \"samba_dnsupdate\",\n \"samba_upgradedns\",\n \"script/traffic_replay\",\n \"script/traffic_learner\"]\n\n if os.path.exists(exe):\n parts[0] = exe\n if cmd in python_cmds and os.getenv(\"PYTHON\", None):\n parts.insert(0, os.environ[\"PYTHON\"])\n\n if not isinstance(line, list):\n line = \" \".join(parts)\n\n return line", "def bash_command(cmd):\n return check_output([\"/bin/bash\",\"-c\",cmd])", "def _shellrun(command_array, cwd=None, capture_ouput=None):\n script = command_array[0]\n if script == 'jinja2_gen.py':\n import jinja2_gen\n with patch('sys.argv', command_array):\n curcwd = os.getcwd()\n os.chdir(cwd)\n try:\n jinja2_gen.main()\n except:\n pass\n os.chdir(curcwd)\n else:\n return subprocess.run(command_array, cwd=cwd, capture_ouput=True)", "def call_command_line(string, **kwargs):\n return subprocess.run(string.split(\" \"), **kwargs)", "def run(*, parser, argv=sys.argv, env=os.environ, sp_run=subprocess.run):\n args = parser.parse_args(argv[1:])\n command = args.__gather_command__\n return command(\n args=args,\n env=env,\n run=sp_run,\n )", "def _get_fastq_to_sam_cmd(fwd_reads, sample_name, read_group, rev_reads=None):\n\n cmd = [\n 'java', '-jar', '/picard/picard.jar', 'FastqToSam',\n f'F1={fwd_reads}',\n f'O=/dev/stdout',\n 'QUIET=true',\n f'SM={sample_name}',\n f'RG={read_group}'\n ]\n if rev_reads is not None:\n cmd.append(f'F2={rev_reads}')\n\n return cmd", "def run_from_args(command):\n return Effect(Run.from_args(command))", "def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)", "def sbatch(filename):\n submit = os.popen(\"sbatch %s\"%(filename)).read()\n subId = submit.split()[3].replace(\"\\n\",\"\")\n return subId", "def _construct_crawl_command(self, site_list, crawl_id):\n #path = os.path.realpath(__file__).rpartition('/')[0]\n cmd_line = (\"python {}/spiderrunner.py {} -r host:{},port:{} -m {}\"\n \" -t {} -c {}\").format(\n _spdr_engine_location(), site_list, self.engine_redis_host,\n self.engine_redis_port, self.mappers,\n self.max_pages, crawl_id)\n if self.psuedo_dist:\n cmd_line += \" -d\"\n return cmd_line", "def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script else \"\"\n parent_shell.define_command(command_name,\n \"python \\\"{bootstrap}\\\" \"\n \"-d \\\"{container}\\\" \"\n \"-r \\\"{scripts}\\\" \"\n \"-s {script}\"\n \"\".format(bootstrap=bootstrap_script,\n container=container_path,\n scripts=scripts_path,\n script=script_fragment))", "def createbash(self,executable,**keywords):\n\t\timport os\n\t\timport stat\n\n\t\toutputname = os.path.join(\"Results\",self.outputfile.replace(\".root\",\"_${SGE_TASK_ID}.root\"))\n\t\t# Extract the input files\n\t\tinputfiles = \"\"\n\t\tfor f in self.inputfiles:\n\t\t\tinputfiles += f+\",\"\n\t\tinputfiles = inputfiles[:-1]\n\n\t\tlines = \"#!/bin/bash\\n\"\n\t\tlines += \"\\n# Script created automatically by skimfiles.py utility\\n\"\n\t\tlines += \"\\nmkdir -p Results\\n\"\n\t\tlines += \"export PATH=$PATH:\"+os.path.join(self.basedir,\"bin\")+\":\"+os.path.join(self.pkgpath,\"bin\")+\"\\n\"\n\t\tlines += \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\"+self.libsdir+\"\\n\"\n\t\tlines += \"\\n\"\n\t\tlines += \"EVENTFILE=\"+self.eventsfile+\"\\n\"\n\t\tlines += \"EVENTS=$(cat $EVENTFILE | head -n $SGE_TASK_ID | tail -n 1)\\n\"\n\t\tlines += executable+\" \"+self.cutid+\" -i \"+inputfiles+\" -c \"+self.cutfile+\\\n\t\t\t\t\" -e $EVENTS -o \"+outputname+\"\\n\"\n\t\n\t\tfilename = self.nameID+\".sh\"\n\t\tf = open(filename,\"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tos.chmod(filename,stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IXOTH)\n\t\t\n\t\treturn filename", "def start_sbatch_job(collection, exp_array, unobserved=False, name=None,\n output_dir_path=\".\", sbatch_options=None, max_simultaneous_jobs=None,\n debug_server=False):\n import pkg_resources\n\n # Set Slurm job array options\n sbatch_options['array'] = f\"0-{len(exp_array) - 1}\"\n if max_simultaneous_jobs is not None:\n sbatch_options['array'] += f\"%{max_simultaneous_jobs}\"\n\n # Set Slurm output parameter\n if 'output' in sbatch_options:\n raise ConfigError(f\"Can't set sbatch `output` Parameter explicitly. SEML will do that for you.\")\n elif output_dir_path == \"/dev/null\":\n output_file = output_dir_path\n else:\n output_file = f'{output_dir_path}/{name}_%A_%a.out'\n sbatch_options['output'] = output_file\n\n # Construct sbatch options string\n sbatch_options_str = create_slurm_options_string(sbatch_options, False)\n\n # Construct chunked list with all experiment IDs\n expid_strings = [('\"' + ';'.join([str(exp['_id']) for exp in chunk]) + '\"') for chunk in exp_array]\n\n with_sources = ('source_files' in exp_array[0][0]['seml'])\n use_conda_env = ('conda_environment' in exp_array[0][0]['seml']\n and exp_array[0][0]['seml']['conda_environment'] is not None)\n\n # Construct Slurm script\n template = pkg_resources.resource_string(__name__, \"slurm_template.sh\").decode(\"utf-8\")\n prepare_experiment_script = pkg_resources.resource_string(__name__, \"prepare_experiment.py\").decode(\"utf-8\")\n prepare_experiment_script = prepare_experiment_script.replace(\"'\", \"'\\\\''\")\n if 'working_dir' in exp_array[0][0]['seml']:\n working_dir = exp_array[0][0]['seml']['working_dir']\n else:\n working_dir = \"${{SLURM_SUBMIT_DIR}}\"\n\n variables = {\n 'sbatch_options': sbatch_options_str,\n 'working_dir': working_dir,\n 'use_conda_env': str(use_conda_env).lower(),\n 'conda_env': exp_array[0][0]['seml']['conda_environment'] if use_conda_env else \"\",\n 'exp_ids': ' '.join(expid_strings),\n 'with_sources': str(with_sources).lower(),\n 'prepare_experiment_script': prepare_experiment_script,\n 'db_collection_name': collection.name,\n 'sources_argument': \"--stored-sources-dir $tmpdir\" if with_sources else \"\",\n 'verbose': logging.root.level <= logging.VERBOSE,\n 'unobserved': unobserved,\n 'debug_server': debug_server,\n 'tmp_directory': SETTINGS.TMP_DIRECTORY\n }\n setup_command = SETTINGS.SETUP_COMMAND.format(**variables)\n end_command = SETTINGS.END_COMMAND.format(**variables)\n\n script = template.format(\n setup_command=setup_command,\n end_command=end_command,\n **variables,\n )\n\n path = os.path.join(SETTINGS.TMP_DIRECTORY, f'{uuid.uuid4()}.sh')\n with open(path, \"w\") as f:\n f.write(script)\n\n try:\n output = subprocess.run(f'sbatch {path}', shell=True, check=True, capture_output=True).stdout\n except subprocess.CalledProcessError as e:\n logging.error(f\"Could not start Slurm job via sbatch. Here's the sbatch error message:\\n\"\n f\"{e.stderr.decode('utf-8')}\")\n os.remove(path)\n exit(1)\n\n slurm_array_job_id = int(output.split(b' ')[-1])\n for task_id, chunk in enumerate(exp_array):\n for exp in chunk:\n if not unobserved:\n collection.update_one(\n {'_id': exp['_id']},\n {'$set': {\n 'status': States.PENDING[0],\n 'slurm.array_id': slurm_array_job_id,\n 'slurm.task_id': task_id,\n 'slurm.sbatch_options': sbatch_options,\n 'seml.output_file': f\"{output_dir_path}/{name}_{slurm_array_job_id}_{task_id}.out\"}})\n logging.verbose(f\"Started experiment with array job ID {slurm_array_job_id}, task ID {task_id}.\")\n os.remove(path)", "def gen_command_line(self, testcase):\n found_double_at = False\n new_args = []\n\n for arg in self.target_cmdline:\n if arg == '@@':\n found_double_at = True\n new_args.append(testcase)\n else:\n new_args.append(arg)\n\n if found_double_at:\n stdin = None\n else:\n with open(testcase, 'rb') as inf:\n stdin = inf.read()\n\n return new_args, stdin", "def get_cmd_args(\n cmd_args: List[str],\n env_names: Iterable[str],\n kwargs: Dict[str, bool],\n) -> Union[List[str], str]:\n for env_name in env_names:\n env_value = os.environ.get(env_name)\n if env_value is not None:\n settings = f'{env_value}/settings64.sh'\n if os.path.isfile(settings):\n kwargs['shell'] = True\n kwargs['executable'] = 'bash'\n return ' '.join([\n 'source',\n shlex.quote(settings),\n ';',\n 'exec',\n *map(shlex.quote, cmd_args),\n ])\n return cmd_args", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def get_cmd_line_args():\n parser = argparse.ArgumentParser(\n description=DOC, formatter_class=argparse.RawTextHelpFormatter)\n\n # Required arguments\n required = parser.add_argument_group(\"Required arguments\")\n required.add_argument(\n \"--fastq-regex\", required=True, type=is_fastq_regex, metavar=\"<regex>\",\n help=\n \"Snakemake regex used to infer the FASTQ files to process and the \"\n \"related wildcards: {sample} (mandatory), {lane} (optional) and \"\n \"{end} (mandatory if paired-end sequencing), e.g. \"\n \"/path/to/data/{sample}/{sample}_{ignore1}_{lane}_{end}_001.fastq.gz\"\n )\n required.add_argument(\n \"--outdir\", required=True, metavar=\"<dir>\", help=\"Output directory.\"\n )\n required.add_argument(\n \"--ref-build\", required=True, metavar=\"<version>\", help=\n \"Reference genome build, e.g. hg19 or mm10. Assuming the existence of \"\n \"the 3 following files in <--ref-genome-dir>: <ref build>.fa \"\n \"<ref build>.fa.fai and <ref build>.fa.dict\"\n )\n required.add_argument(\n \"--ref-genome-dir\", metavar=\"<dir>\", help=\n \"Bisulfite reference genome directory, including '<ref build>.fa', \"\n \"'<ref build>.fa.fai', '<ref build>.fa.dict' and the \"\n \"'Bisulfite_Genome' directory created by running the \"\n \"'bismark_genome_preparation' script. See README.md documentation.\"\n )\n\n\n # Optional general arguments\n optional = parser.add_argument_group(\"Optional\")\n optional.add_argument(\n \"--single-end\", action=\"store_false\", dest=\"paired_end\", help=\n \"By default paired-end sequencing is assumed, for single-end set this \"\n \"flag.\"\n )\n optional.add_argument(\n \"--rrbs\", action=\"store_true\", help=\n \"For Reduced Representation Bisulfite Sequencing (RRBS) set this flag.\"\n )\n optional.add_argument(\n \"--no-deduplication\", action=\"store_false\",\n dest=\"use_bismark_deduplicate\", help=\n \"Set this flag to not apply Bismark BAM deduplication. The deduplica\"\n \"tion removes reads with similar start/end positions on a given chromo\"\n \"some. It is not a valid PCR correction for RRBS or amplicon data. \"\n \"The deduplication is not applied if the --rrbs flag is set.\"\n )\n optional.add_argument(\n \"--non-directional-library\", action=\"store_false\",\n dest=\"directional_library\", help=\n \"By default the library is assumed to be directional, if not set this \"\n \"flag. See Bismark documentation for more information.\"\n )\n optional.add_argument(\n \"--target-bed\", type=is_file, metavar=\"<path>\", help=\n \"For targeted sequencing, the path to the BED file listing the regions\"\n \" targeted. Used only for read coverage computation. If no BED is pro\"\n \"vided the coverage will be computed on the whole reference genome.\"\n )\n optional.add_argument(\n \"--target-kit\", metavar=\"<name>\", help=\n \"For targeted sequencing, the name of the kit used to target to be \"\n \"reported in the preprocessing report. Does not affect processing.\"\n )\n optional.add_argument(\n \"--phred\", type=int, choices={33, 64}, default=DEFAULT_OF[\"phred\"],\n metavar=\"<33|64>\", help=\n \"Base quality encoding of input FASTQ files: 33|64, by default %i.\"\n % DEFAULT_OF[\"phred\"]\n )\n optional.add_argument(\n \"--r1-id\", default=DEFAULT_OF[\"r1_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R1 (forward) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r1_id\"]\n )\n optional.add_argument(\n \"--r2-id\", default=DEFAULT_OF[\"r2_id\"], metavar=\"<ID>\", help=\n \"Case-insensitive ID used to identify R2 (reverse) reads in paired-end\"\n \" sequencing, by default '%s'.\" % DEFAULT_OF[\"r2_id\"]\n )\n optional.add_argument(\n \"--read-length\", type=int, metavar=\"<int>\", help=\n \"Length of reads (e.g. 150) to write in the HTML report. \"\n \"Does not affect the processing.\"\n )\n\n # Optional FastQC arguments\n fastqc = parser.add_argument_group(\"FastQC optional\")\n optional.add_argument(\n \"--fastqc-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"fastqc\"][\"threads\"], help=\n \"FastQC '--threads' argument, by default %i.\"\n % DEFAULT_OF[\"fastqc\"][\"threads\"]\n )\n\n # Optional Trim Galore arguments\n trim_galore = parser.add_argument_group(\"Trim Galore optional\")\n ADAPTERS_URL = (\n \"https://support.illumina.com/bulletins/2016/12/what-sequences-do-i\"\n \"-use-for-adapter-trimming.html\")\n trim_galore.add_argument(\n \"--adapter-r1\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter' argument: adapter sequence to be trimmed off \"\n \"read 1. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--adapter-r2\", metavar=\"<sequence>\", help=\n \"Trim Galore '--adapter2' argument: adapter sequence to be trimmed \"\n \"off read 2. Common sequences: %s\" % ADAPTERS_URL\n )\n trim_galore.add_argument(\n \"--quality\", type=int, default=DEFAULT_OF[\"trim_galore\"][\"quality\"],\n metavar=\"<int>\", help=\n \"Trim Galore '--quality' argument, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"quality\"]\n )\n trim_galore.add_argument(\n \"--stringency\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"stringency\"], help=\n \"Trim Galore '--stringency' argument: overlap with adapter sequence \"\n \"required to trim, by default %i (very stringent).\"\n % DEFAULT_OF[\"trim_galore\"][\"stringency\"]\n )\n trim_galore.add_argument(\n \"--min-length\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"trim_galore\"][\"min_length\"], help=\n \"Trim Galore '--length' argument: minimum read length after trimming \"\n \"otherwise removed, by default %i.\"\n % DEFAULT_OF[\"trim_galore\"][\"min_length\"]\n )\n trim_galore.add_argument(\n \"--error-rate\", type=float, metavar=\"<float>\",\n default=DEFAULT_OF[\"trim_galore\"][\"error_rate\"], help=\n \"Trim Galore '-e' argument: maximum allowed error rate with the \"\n \"matching region, by default {}\"\n .format(DEFAULT_OF[\"trim_galore\"][\"error_rate\"])\n )\n trim_galore.add_argument(\n \"--max-n\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--max_n' argument: Maximum number of 'N's in a read \"\n \"otherwise removed. By default not applied.\"\n )\n trim_galore.add_argument(\n \"--trim-n\", action=\"store_true\", help=\n \"Trim Galore '--trim-n' argument: remove 'N's from ends of the read.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R1' argument: remove basepairs from 5' end of \"\n \"read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-5p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--clip_R2' argument: remove basepairs from 5' end of \"\n \"read 2. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r1-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R1' argument: remove basepairs from \"\n \"3' end of read 1. Useful if there is a methylation bias at this end.\"\n )\n trim_galore.add_argument(\n \"--clip-r2-3p\", type=int, metavar=\"<int>\", help=\n \"Trim Galore '--three_prime_clip_R2' argument: remove basepairs from \"\n \"3' end of read 2. Useful if there is a methylation bias at this end.\"\n )\n\n # Optional Bismark tools arguments\n bismark = parser.add_argument_group(\"Bismark optional\")\n bismark.add_argument(\n \"--seed-mismatch\", type=int, choices=[0, 1], metavar=\"<0|1>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"], help=\n \"Maximum number of mismatch allowed in a seed alignment: 0|1, \"\n \"by default %i.\" % DEFAULT_OF[\"bismark_bowtie2\"][\"seed_mismatch\"]\n )\n bismark.add_argument(\n \"--bowtie2-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"], help=\n \"Bowtie2 '--threads' argument, by default %i\"\n % DEFAULT_OF[\"bismark_bowtie2\"][\"threads\"])\n bismark.add_argument(\n \"--meth-extract-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"], help=\n \"bismark_methylation_extractor '--multicore' argument, by default %i.\"\n % DEFAULT_OF[\"bismark_meth_extract\"][\"threads\"]\n )\n\n # Optional Picard arguments\n picard = parser.add_argument_group(\"Picard optional\")\n picard.add_argument(\n \"--picard-jvm-args\", default=DEFAULT_OF[\"picard\"][\"jvm_args\"],\n metavar=\"<args>\", help=\n \"Java virtual machine arguments, e.g. to control starting and maximum \"\n \"heap size when running Picard, by default '%s'.\"\n % DEFAULT_OF[\"picard\"][\"jvm_args\"]\n )\n\n # Optional Samtools arguments\n samtools = parser.add_argument_group(\"Samtools optional\")\n samtools.add_argument(\n \"--samtools-threads\", type=int, metavar=\"<int>\",\n default=DEFAULT_OF[\"samtools\"][\"threads\"], help=\n \"Samtools '--threads' argument, by default %i\"\n % DEFAULT_OF[\"samtools\"][\"threads\"])\n\n # Parse the command line\n args = parser.parse_args()\n\n # For paired-end sequencing, check that the '{end}' wildcard is provided\n if args.paired_end is True and \"{end}\" not in args.fastq_regex:\n raise ValueError(\n \"The wildcard '{end}' is required in --fastq-regex argument when \"\n \"working with paired-end sequencing.\")\n\n # Set 'use_bismark_deduplicate' to False if RRBS data\n if args.rrbs is True:\n args.use_bismark_deduplicate = False\n\n # Check reference genome directory\n is_ref_genome_dir(args.ref_genome_dir, args.ref_build)\n\n # Convert the argparse object to a dict mapping <arg name> -> <val>\n kwargs = vars(args)\n\n return kwargs", "def bowtie_build_cmd(fasta,ebwt_basename):\n build_index_cmd = Command(\"bowtie-build\",\n \"-f\",fasta,\n ebwt_basename)\n return build_index_cmd" ]
[ "0.65440714", "0.59753895", "0.5942126", "0.5696643", "0.5603739", "0.5468804", "0.5436128", "0.5372883", "0.5347866", "0.5340703", "0.5326278", "0.52864385", "0.5263525", "0.5238756", "0.5226932", "0.5214312", "0.5206341", "0.51938975", "0.51719457", "0.51584595", "0.5135931", "0.51314515", "0.5097176", "0.50924736", "0.50888544", "0.50819737", "0.5071437", "0.5032381", "0.5018861", "0.5002217" ]
0.6852868
0
Echo the given command returning it unchanged. If passed, save_to should be the path to a file. When passed the command will also append the output to the given file.
def echo_command(command: Sequence[str], *, save_to: Optional[Path]) -> Sequence[str]: output = " ".join(shlex.quote(part) for part in command) print(output) if save_to is not None: with save_to.open(mode="a", encoding="utf-8") as save_to_file: print(output, file=save_to_file) return command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_command(cmd, output_file):\n global txt_output_dir\n output_path = os.path.join(txt_output_dir, output_file)\n print \"doing: %s > %s\" % (cmd, output_path)\n output = check_output(cmd.split(\" \"))\n with open(output_path, \"w\") as f:\n f.write(output)", "def help_save(self):\n help_str = \"\"\"Saves command(s) from history to file.\n\n Usage: save [N] [file_path]\n\n optional arguments:\n N - Number of command (from history), or `*` for all commands in history (default: most recent command)\n file_path - location to save script of command(s) to (default: value stored in `default_file_name` parameter)\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def command(self, cmd, arg=None):\n\n if self._has_outdir(cmd) and self._tmpdir:\n arg += f\" -o {self._tmpdir}\"\n\n cmd = [self._tool_path, cmd]\n if arg:\n cmd += arg.split()\n\n return self._command(cmd)", "def do_save(self, arg):\n try:\n args = self.saveparser.parseString(arg)\n except pyparsing.ParseException:\n self.perror('Could not understand save target %s' % arg)\n raise SyntaxError(self.do_save.__doc__)\n fname = args.fname or self.default_file_name\n if args.idx == '*':\n saveme = '\\n\\n'.join(self.history[:])\n elif args.idx:\n saveme = self.history[int(args.idx) - 1]\n else:\n # Since this save command has already been added to history, need to go one more back for previous\n saveme = self.history[-2]\n try:\n f = open(os.path.expanduser(fname), 'w')\n f.write(saveme)\n f.close()\n self.pfeedback('Saved to {}'.format(fname))\n except Exception:\n self.perror('Error saving {}'.format(fname))\n raise", "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "def append(command):\n with open(GlobalVariables.get_instance().get('history_filename'), 'ab') as fin:\n fin.write('{0}\\n'.format(command))\n fin.close()", "def do_save(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.save_to_file(**cmd_args)\n if success:\n self.console_print(\"Yippee! saved successfully!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again tho.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()", "def do_save(self, arg):\n try:\n args = self.saveparser.parseString(arg)\n except pyparsing.ParseException:\n self.perror('Could not understand save target %s' % arg, traceback_war=False)\n raise SyntaxError(self.do_save.__doc__)\n\n # If a filename was supplied then use that, otherwise use a temp file\n if args.fname:\n fname = args.fname\n else:\n fd, fname = tempfile.mkstemp(suffix='.txt', text=True)\n os.close(fd)\n\n if args.idx == '*':\n saveme = '\\n\\n'.join(self.history[:])\n elif args.idx:\n saveme = self.history[int(args.idx) - 1]\n else:\n # Wrap in try to deal with case of empty history\n try:\n # Since this save command has already been added to history, need to go one more back for previous\n saveme = self.history[-2]\n except IndexError:\n self.perror('History is empty, nothing to save.', traceback_war=False)\n return\n try:\n f = open(os.path.expanduser(fname), 'w')\n f.write(saveme)\n f.close()\n self.pfeedback('Saved to {}'.format(fname))\n except Exception as e:\n self.perror('Saving {!r} - {}'.format(fname, e), traceback_war=False)", "def OnSim42RunCmdFileDump(self, event):\n path = self.PromptPathOpenCmd()\n if not path: return\n pathOut = self.PromptPathSaveCmd()\n if not pathOut: return\n f = open(pathOut, 'w')\n oldOut = self.sim42interp.cmd.output\n oldOutSys = sys.stdout\n self.sim42interp.cmd.output = f\n sys.stdout = f\n self.IgnoreMessages()\n self.RunCmdFile(path)\n self.UnIgnoreMessages()\n f.close()\n self.sim42interp.cmd.output = oldOut\n sys.stdout = oldOutSys", "def save_cmd(self):\n\n cmd_file = pjoin(self.out_dir,\n 'cmd_issued.visualqc.{}'.format(self.__name__))\n with open(cmd_file, 'w') as cf:\n cf.write('{}\\n'.format(' '.join(sys.argv)))\n\n return", "def execute(cmd, log_cmd=True, also_output_to_file=None):\r\n return CommandUtil._execute_internal(cmd, True, True, log_cmd, also_output_to_file)", "def edit_or_output(output=True, path=None):\n text = None\n\n if output:\n with path.open('rt') as path_handle:\n text = path_handle.read()\n click.echo(''.join(text))\n else:\n text = click.edit(filename=str(path))\n\n return text", "def save():\n click.echo(\"Not implemented yet. In the future, this command will be used for saving.\")\n sys.exit(-2)", "def exec_to_file ( cmd, output_file, cwd = '/tmp/' ):\n\n try:\n dn = open(os.devnull, 'r')\n with open(output_file, 'w') as fo:\n vlog(4, 'Running command: %s > %s from %s '% (cmd, output_file, cwd))\n p = subprocess.Popen(\n cmd, \n stdin=dn,\n stdout=fo, \n stderr=fo, \n cwd=cwd, \n close_fds=True\n )\n\n if p:\n p.wait()\n return p.returncode\n\n except Exception as e:\n vlog(1, 'Command Error: %s'% (str(e)))\n\n vlog(1, 'Failed to run command: %s > %s '% (cmd, output_file))\n return None", "def console_command(command: str, bWriteToLog: bool = False) -> None:\n get_player_controller().ConsoleCommand(command, bWriteToLog)", "def do_echo(self, line):\n print line.replace('$out', self.last_output)", "def run_console_redirect(self, command, exit_if_error=True,\n log_error_as_warning=False, print_to_console=True):\n\n excluded_commands = ['>', 'rm', '[ ! -d']\n # If the command already have output redirect or use 'rm' command,\n # uses major method 'run'\n for excluded_command in excluded_commands:\n if excluded_command in command:\n return self.run(command, exit_if_error, log_error_as_warning,\n print_to_console)\n\n # Folder for output/error files on remote host\n rem_tmp_folder = '/tmp'\n # Prefix for output/error files\n tmp_fname_prefix = '{}'.format(str(uuid.uuid4())[0:8])\n # rco = remote command output\n tmp_out_fname = 'rco.{}.out'.format(tmp_fname_prefix)\n tmp_err_fname = 'rco.{}.err'.format(tmp_fname_prefix)\n # Remote files\n rem_tmp_out_fname = os.path.join(rem_tmp_folder, tmp_out_fname)\n rem_tmp_err_fname = os.path.join(rem_tmp_folder, tmp_err_fname)\n # Local files. Use tmp/<process id> folder on local system\n local_tmp_out_fname = '{}/{}'.format(\n TestConfigurationManager.Instance().tmp_path(),\n '{}'.format(tmp_out_fname))\n local_tmp_err_fname = '{}/{}'.format(\n TestConfigurationManager.Instance().tmp_path(),\n '{}'.format(tmp_err_fname))\n\n add_command = '1>{} 2>{}'.format(rem_tmp_out_fname,\n rem_tmp_err_fname)\n if command.endswith('|| true'):\n command = command.replace('|| true',\n '{} || true'.format(add_command))\n else:\n command = '{} {}'.format(command, add_command)\n\n self.run(command, exit_if_error, log_error_as_warning, print_to_console)\n self.download(rem_tmp_out_fname, local_tmp_out_fname, False)\n self.download(rem_tmp_err_fname, local_tmp_err_fname, False)\n\n self.run('rm -rf {} {}'.format(tmp_out_fname, tmp_err_fname),\n exit_if_error, log_error_as_warning, print_to_console=False)\n\n result = (to_list_from_file(local_tmp_out_fname),\n to_list_from_file(local_tmp_err_fname))\n return result", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def send_command(command):\n print(\"Send: >>> \\n\"+command)\n TOFILE.write(command + EOL)\n TOFILE.flush()", "def backup_command(server, output):\n # Stop saving chunks\n server.save_off()\n # Run the external save program\n subprocess.call(CONFIG['backup_command']['script'].split())\n # Start saving chunks again\n server.save_on()\n return", "def view_command(path, verbose):\n job = ReadOnlyJob(path)\n print(job.summary(verbose=verbose))", "def output(*args):\n print(*args, end='', file=file)", "def run(command):\n if arguments['--dry-run']:\n print command\n else:\n subprocess.call(command, shell=True)", "def write(self, command):\n if not command.endswith('\\n'):\n command += '\\n'\n self.rpc.call(MsfRpcMethod.ConsoleWrite, [self.cid, command])", "def save_result(self):\n self.print_to_console()", "def sh_e_out(cls, cmd, **kwargs):\n cmd_kwargs = {\n 'stdout': subprocess.PIPE,\n }\n cmd_kwargs.update(kwargs)\n return cls.sh_e(cmd, **cmd_kwargs)[0]", "def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)", "def call(self, *args):\n self.formula.to_file(self.output_file)", "def run_command(opts, cmd):\n print(cmd)\n if not opts.dryrun:\n print(check_output(cmd, shell=True))" ]
[ "0.6414438", "0.5620966", "0.5580521", "0.55630916", "0.54647315", "0.54493", "0.54357255", "0.5429927", "0.541318", "0.5368313", "0.5360812", "0.5334565", "0.5324181", "0.5257833", "0.525267", "0.5240965", "0.52371734", "0.5206752", "0.51941127", "0.51866156", "0.5181104", "0.5096643", "0.5087754", "0.507494", "0.5069031", "0.5058159", "0.5033387", "0.50011885", "0.49953923", "0.4993734" ]
0.78671086
0